| /drivers/crypto/intel/qat/qat_common/ |
| A D | adf_mstate_mgr.c | 161 strscpy(sect->id, id, sizeof(sect->id)); in adf_mstate_sect_add_header() 166 return sect; in adf_mstate_sect_add_header() 176 if (!sect) in adf_mstate_sect_add_vreg() 188 return sect; in adf_mstate_sect_add_vreg() 201 if (!sect) in adf_mstate_sect_add() 205 return sect; in adf_mstate_sect_add() 222 return sect; in adf_mstate_sect_add() 295 if (!strncmp(sect->id, id, sizeof(sect->id))) in adf_mstate_sect_lookup() 298 sect = (struct adf_mstate_sect_h *)(sect->state + sect->size); in adf_mstate_sect_lookup() 311 return sect; in adf_mstate_sect_lookup() [all …]
|
| A D | adf_mstate_mgr.h | 77 struct adf_mstate_sect_h *sect);
|
| /drivers/nvme/target/ |
| A D | zns.c | 156 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv() local 159 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv() 253 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba() local 475 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work() local 494 if (sect >= get_capacity(bdev->bd_disk)) { in nvmet_bdev_zmgmt_send_work() 500 if (sect & (zone_sectors - 1)) { in nvmet_bdev_zmgmt_send_work() 506 ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors); in nvmet_bdev_zmgmt_send_work() 535 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append() local 560 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append() 566 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append() [all …]
|
| /drivers/net/ethernet/intel/ice/ |
| A D | ice_vlan_mode.c | 15 struct ice_meta_init_section *sect; in ice_pkg_get_supported_vlan_mode() local 24 meta_init_size, (void **)§); in ice_pkg_get_supported_vlan_mode() 29 sect->count = cpu_to_le16(1); in ice_pkg_get_supported_vlan_mode() 30 sect->offset = cpu_to_le16(ICE_META_VLAN_MODE_ENTRY); in ice_pkg_get_supported_vlan_mode() 42 arr[i] = le32_to_cpu(sect->entry.bm[i]); in ice_pkg_get_supported_vlan_mode()
|
| A D | ice_flex_pipe.c | 84 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) in ice_sect_id() argument 86 return ice_sect_lkup[blk][sect]; in ice_sect_id() 1897 void *sect; in ice_fill_tbl() local 1910 sect = ice_pkg_enum_section(hw->seg, &state, sid); in ice_fill_tbl() 1912 while (sect) { in ice_fill_tbl() 1919 xlt1 = sect; in ice_fill_tbl() 1932 xlt2 = sect; in ice_fill_tbl() 1945 pid = sect; in ice_fill_tbl() 1958 pr = sect; in ice_fill_tbl() 1971 es = sect; in ice_fill_tbl() [all …]
|
| A D | ice_ddp.h | 446 void *sect; member
|
| A D | ice_ddp.c | 260 state->sect = in ice_pkg_enum_section() 264 return state->sect; in ice_pkg_enum_section() 317 entry = state->handler(state->sect_type, state->sect, state->entry_idx, in ice_pkg_enum_entry() 325 entry = state->handler(state->sect_type, state->sect, in ice_pkg_enum_entry()
|
| /drivers/block/ |
| A D | floppy.c | 2191 here[count].sect = 0; in setup_format_params() 2196 here[n].sect = count; in setup_format_params() 2202 while (here[n].sect) in setup_format_params() 2299 if (block > _floppy->sect) in request_done() 2583 fsector_t < _floppy->sect) in make_raw_rw_request() 2584 max_sector = _floppy->sect; in make_raw_rw_request() 2618 tracksize = _floppy->sect - _floppy->sect % ssize; in make_raw_rw_request() 2636 max_sector = _floppy->sect; in make_raw_rw_request() 2639 max_sector = _floppy->sect; in make_raw_rw_request() 3233 if ((int)g->sect <= 0 || in set_geometry() [all …]
|
| A D | amiflop.c | 829 unsigned char sect; member 863 hdr.magic, hdr.track, hdr.sect, hdr.ord, in amiga_read() 880 raw = decode ((ulong *)(unit[drive].trackbuf + hdr.sect*512), in amiga_read() 882 csum = checksum((ulong *)(unit[drive].trackbuf + hdr.sect*512), 512); in amiga_read() 886 hdr.magic, hdr.track, hdr.sect, hdr.ord, scnt, in amiga_read() 889 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[0], in amiga_read() 890 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[1], in amiga_read() 891 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[2], in amiga_read() 892 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[3]); in amiga_read() 944 hdr.sect = cnt; in putsec() [all …]
|
| A D | ataflop.c | 747 int sect, nsect; in do_format() local 790 for( sect = 0; sect < nsect; ++sect ) { in do_format() 796 *p++ = (nsect + sect - desc->sect_offset) % nsect + 1; in do_format() 1620 getprm.sect = dtp->spt; in fd_locked_ioctl() 1655 drive, setprm.size, setprm.sect, setprm.stretch); in fd_locked_ioctl() 1682 && dtp->spt == setprm.sect in fd_locked_ioctl() 1715 dtp->spt = setprm.sect; in fd_locked_ioctl() 1716 if (setprm.sect > 14) in fd_locked_ioctl()
|
| A D | swim.c | 603 fs->secpercyl = g->head * g->sect; in setup_medium() 604 fs->secpertrack = g->sect; in setup_medium() 725 geo->sectors = g->sect; in floppy_getgeo()
|
| /drivers/net/ethernet/alacritech/ |
| A D | slicoss.c | 1120 u32 sect; in slic_load_firmware() local 1173 for (sect = 0; sect < numsects; sect++) { in slic_load_firmware() 1174 unsigned int ssize = sectsize[sect] >> 3; in slic_load_firmware() 1176 base = sectstart[sect]; in slic_load_firmware() 1192 for (sect = 0; sect < numsects; sect++) { in slic_load_firmware() 1193 unsigned int ssize = sectsize[sect] >> 3; in slic_load_firmware() 1196 base = sectstart[sect]; in slic_load_firmware()
|
| /drivers/md/ |
| A D | raid1.c | 263 sector_t sect = r1_bio->sector; in put_buf() local 274 lower_barrier(conf, sect); in put_buf() 2142 sector_t sect = r1_bio->sector; in fix_sync_read_error() local 2208 sect += s; in fix_sync_read_error() 2222 if (r1_sync_page_io(rdev, sect, s, in fix_sync_read_error() 2237 if (r1_sync_page_io(rdev, sect, s, in fix_sync_read_error() 2243 sect += s; in fix_sync_read_error() 2402 sector_t sect = r1_bio->sector; in fix_read_error() local 2460 r1_sync_page_io(rdev, sect, s, in fix_read_error() 2479 (unsigned long long)(sect + in fix_read_error() [all …]
|
| A D | raid10.c | 2488 sector_t sect = 0; in fix_recovery_read_error() local 2554 sect += s; in fix_recovery_read_error() 2669 sect, in fix_read_error() 2693 + sect, in fix_read_error() 2718 sect, in fix_read_error() 2725 sect + in fix_read_error() 2750 sect, in fix_read_error() 2757 sect + in fix_read_error() 2768 sect + in fix_read_error() 2778 sect += s; in fix_read_error() [all …]
|
| A D | raid5-cache.c | 196 sector_t sect) in r5c_tree_index() argument 198 sector_div(sect, conf->chunk_sectors); in r5c_tree_index() 199 return sect; in r5c_tree_index() 859 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) in r5l_append_flush_payload() argument 889 payload->flush_stripes[0] = cpu_to_le64(sect); in r5l_append_flush_payload() 1929 r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) in r5c_recovery_lookup_stripe() argument 1934 if (sh->sector == sect) in r5c_recovery_lookup_stripe() 2927 bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) in r5c_big_stripe_cached() argument 2936 tree_index = r5c_tree_index(conf, sect); in r5c_big_stripe_cached()
|
| A D | raid5-log.h | 31 bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
|
| /drivers/gpu/drm/kmb/ |
| A D | kmb_dsi.c | 711 u8 frame_id, sect; in mipi_tx_init_cntrl() local 735 for (sect = 0; sect < MIPI_CTRL_VIRTUAL_CHANNELS; sect++) { in mipi_tx_init_cntrl() 736 if (!frame->sections[sect]) in mipi_tx_init_cntrl() 739 ret = mipi_tx_fg_section_cfg(kmb_dsi, frame_id, sect, in mipi_tx_init_cntrl() 740 frame->sections[sect], in mipi_tx_init_cntrl()
|
| /drivers/mtd/chips/ |
| A D | cfi_cmdset_0002.c | 2802 struct ppb_lock *sect; in cfi_ppb_unlock() local 2822 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL); in cfi_ppb_unlock() 2823 if (!sect) in cfi_ppb_unlock() 2846 sect[sectors].chip = &cfi->chips[chipnum]; in cfi_ppb_unlock() 2847 sect[sectors].adr = adr; in cfi_ppb_unlock() 2848 sect[sectors].locked = do_ppb_xxlock( in cfi_ppb_unlock() 2874 kfree(sect); in cfi_ppb_unlock() 2883 kfree(sect); in cfi_ppb_unlock() 2892 if (sect[i].locked) in cfi_ppb_unlock() 2893 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, in cfi_ppb_unlock() [all …]
|
| /drivers/ata/ |
| A D | libata-scsi.c | 1465 u32 sect, head, cyl, track; in ata_scsi_verify_xlat() local 1474 sect = (u32)block % dev->sectors + 1; in ata_scsi_verify_xlat() 1480 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) in ata_scsi_verify_xlat() 1485 tf->lbal = sect; in ata_scsi_verify_xlat() 3632 u16 sect, fp = (u16)-1; in ata_scsi_zbc_in_xlat() local 3663 sect = n_block / 512; in ata_scsi_zbc_in_xlat() 3672 tf->feature = sect & 0xff; in ata_scsi_zbc_in_xlat() 3673 tf->hob_feature = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat() 3680 tf->hob_nsect = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat() 3681 tf->nsect = sect & 0xff; in ata_scsi_zbc_in_xlat()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | gfx_v6_0.c | 2006 const struct cs_section_def *sect = NULL; in gfx_v6_0_cp_gfx_start() local 2041 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in gfx_v6_0_cp_gfx_start() 2042 for (ext = sect->section; ext->extent != NULL; ++ext) { in gfx_v6_0_cp_gfx_start() 2043 if (sect->id == SECT_CONTEXT) { in gfx_v6_0_cp_gfx_start() 2829 const struct cs_section_def *sect = NULL; in gfx_v6_0_get_csb_size() local 2840 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in gfx_v6_0_get_csb_size() 2841 for (ext = sect->section; ext->extent != NULL; ++ext) { in gfx_v6_0_get_csb_size() 2842 if (sect->id == SECT_CONTEXT) in gfx_v6_0_get_csb_size()
|
| A D | gfx_v7_0.c | 2463 const struct cs_section_def *sect = NULL; in gfx_v7_0_cp_gfx_start() local 2494 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in gfx_v7_0_cp_gfx_start() 2495 for (ext = sect->section; ext->extent != NULL; ++ext) { in gfx_v7_0_cp_gfx_start() 2496 if (sect->id == SECT_CONTEXT) { in gfx_v7_0_cp_gfx_start() 3856 const struct cs_section_def *sect = NULL; in gfx_v7_0_get_csb_size() local 3867 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in gfx_v7_0_get_csb_size() 3868 for (ext = sect->section; ext->extent != NULL; ++ext) { in gfx_v7_0_get_csb_size() 3869 if (sect->id == SECT_CONTEXT) in gfx_v7_0_get_csb_size()
|
| A D | amdgpu_gfx.c | 2308 const struct cs_section_def *sect = NULL; in amdgpu_gfx_csb_data_parser() local 2312 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in amdgpu_gfx_csb_data_parser() 2313 for (ext = sect->section; ext->extent != NULL; ++ext) { in amdgpu_gfx_csb_data_parser() 2314 if (sect->id == SECT_CONTEXT) { in amdgpu_gfx_csb_data_parser()
|
| /drivers/mtd/ |
| A D | ftl.c | 945 u_long sect; in ftl_getgeo() local 948 sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE; in ftl_getgeo() 952 geo->cylinders = sect >> 3; in ftl_getgeo()
|
| /drivers/block/null_blk/ |
| A D | zoned.c | 19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) in null_zone_no() argument 21 return sect >> ilog2(dev->zone_size_sects); in null_zone_no()
|
| /drivers/gpu/drm/radeon/ |
| A D | si.c | 5666 const struct cs_section_def *sect = NULL; in si_get_csb_size() local 5677 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { in si_get_csb_size() 5678 for (ext = sect->section; ext->extent != NULL; ++ext) { in si_get_csb_size() 5679 if (sect->id == SECT_CONTEXT) in si_get_csb_size() 5698 const struct cs_section_def *sect = NULL; in si_get_csb_buffer() local 5713 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { in si_get_csb_buffer() 5714 for (ext = sect->section; ext->extent != NULL; ++ext) { in si_get_csb_buffer() 5715 if (sect->id == SECT_CONTEXT) { in si_get_csb_buffer()
|