Home
last modified time | relevance | path

Searched refs:sectors (Results 1 – 25 of 128) sorted by relevance

123456

/drivers/target/
A Dtarget_core_sbc.c668 u32 sectors, bool is_write) in sbc_check_prot() argument
773 u32 sectors = 0; in sbc_parse_cdb() local
883 if (!sectors) { in sbc_parse_cdb()
911 sectors = cdb[13]; in sbc_parse_cdb()
915 if (sectors > 1) { in sbc_parse_cdb()
917 " than 1\n", sectors); in sbc_parse_cdb()
929 cmd->t_task_nolb = sectors; in sbc_parse_cdb()
983 if (!sectors) { in sbc_parse_cdb()
997 if (!sectors) { in sbc_parse_cdb()
1282 left = sectors * dev->prot_length; in sbc_dif_copy_prot()
[all …]
/drivers/md/
A Draid0.c66 sector_t curr_zone_end, sectors; in create_strip_zones() local
83 sectors = rdev1->sectors; in create_strip_zones()
85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
103 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones()
194 if (!smallest || (rdev1->sectors < smallest->sectors)) in create_strip_zones()
235 if (!smallest || rdev->sectors < smallest->sectors) { in create_strip_zones()
244 sectors = (smallest->sectors - zone->dev_start) * c; in create_strip_zones()
249 curr_zone_end += sectors; in create_strip_zones()
358 WARN_ONCE(sectors || raid_disks, in raid0_size()
595 unsigned sectors; in raid0_make_request() local
[all …]
A Dmd-linear.c55 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
61 WARN_ONCE(sectors || raid_disks, in linear_size()
115 sector_t sectors; in linear_conf() local
125 sectors = rdev->sectors; in linear_conf()
126 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
127 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
130 conf->array_sectors += rdev->sectors; in linear_conf()
142 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf()
147 conf->disks[i].rdev->sectors; in linear_conf()
243 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; in linear_make_request()
[all …]
A Draid1.c574 len = sectors; in align_to_barrier_unit_end()
2143 int sectors = r1_bio->sectors; in fix_sync_read_error() local
2159 while(sectors) { in fix_sync_read_error()
2207 sectors -= s; in fix_sync_read_error()
2242 sectors -= s; in fix_sync_read_error()
2403 int sectors = r1_bio->sectors; in fix_read_error() local
2413 while(sectors) { in fix_read_error()
2486 sectors -= s; in fix_read_error()
2510 int sectors; in narrow_write_error() local
2549 sectors, 0) in narrow_write_error()
[all …]
A Draid10.c727 int sectors = r10_bio->sectors; in read_balance() local
1140 sectors); in regular_request_wait()
1377 sectors = r10_bio->sectors; in raid10_write_request()
1547 r10_bio->sectors = sectors; in __make_request()
2397 int sectors = r10_bio->sectors; in sync_request_write() local
2489 int sectors = r10_bio->sectors; in fix_recovery_read_error() local
2553 sectors -= s; in fix_recovery_read_error()
2629 int sectors = r10_bio->sectors, slot = r10_bio->read_slot; in fix_read_error() local
2801 int sectors; in narrow_write_error() local
3795 if (!sectors) in raid10_size()
[all …]
A Dmd.h133 sector_t sectors; /* Device size (in 512bytes sectors) */ member
295 static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors, in is_badblock() argument
300 sectors, in is_badblock()
310 int sectors) in rdev_has_badblock() argument
315 return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors); in rdev_has_badblock()
318 extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
320 extern void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
765 int (*resize) (struct mddev *mddev, sector_t sectors);
766 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
791 unsigned long *sectors);
[all …]
A Ddm-stats.c26 unsigned long long sectors[2]; member
602 p->sectors[idx] += len; in dm_stat_for_entry()
731 shared->tmp.sectors[READ] = 0; in __dm_stat_init_temporary_percpu_totals()
732 shared->tmp.sectors[WRITE] = 0; in __dm_stat_init_temporary_percpu_totals()
749 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); in __dm_stat_init_temporary_percpu_totals()
750 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); in __dm_stat_init_temporary_percpu_totals()
783 p->sectors[READ] -= shared->tmp.sectors[READ]; in __dm_stat_clear()
784 p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; in __dm_stat_clear()
901 shared->tmp.sectors[READ], in dm_stats_print()
905 shared->tmp.sectors[WRITE], in dm_stats_print()
A Dmd.c1696 sector_t sectors; in super_1_load() local
3380 *sectors = new; in strict_blocks_to_sectors()
3389 sector_t sectors; in rdev_size_store() local
3401 if (!sectors) in rdev_size_store()
3413 rdev->sectors = sectors; in rdev_size_store()
4728 sector_t sectors; in size_store() local
5540 sector_t sectors; in array_size_store() local
8484 sectors = 0; in md_seq_show()
8501 sectors += rdev->sectors; in md_seq_show()
8649 sectors) - in is_rdev_holder_idle()
[all …]
/drivers/scsi/
A Dscsicam.c179 unsigned long heads, sectors, cylinders, temp; in setsize() local
182 sectors = 62L; /* Maximize sectors per track */ in setsize()
184 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize()
189 sectors = capacity / temp; /* Compute value for sectors per in setsize()
192 sectors++; /* Else, increment number of sectors */ in setsize()
193 temp = heads * sectors; /* Compute divisor for cylinders */ in setsize()
201 *secs = (unsigned int) sectors; in setsize()
A Dps3rom.c162 u32 sectors) in ps3rom_read_request() argument
167 __func__, __LINE__, sectors, start_sector); in ps3rom_read_request()
171 sectors, 0, dev->bounce_lpar, &dev->tag); in ps3rom_read_request()
183 u32 sectors) in ps3rom_write_request() argument
188 __func__, __LINE__, sectors, start_sector); in ps3rom_write_request()
194 sectors, 0, dev->bounce_lpar, &dev->tag); in ps3rom_write_request()
/drivers/mtd/
A Dssfdc.c22 unsigned char sectors; member
317 ssfdc->sectors = 32; in ssfdcr_add_mtd()
318 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd()
320 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd()
323 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd()
325 (long)ssfdc->sectors); in ssfdcr_add_mtd()
328 (long)ssfdc->sectors; in ssfdcr_add_mtd()
411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo()
414 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
A Dnftlcore.c71 nftl->sectors = nftl->mbd.size / temp; in nftl_add_mtd()
73 nftl->sectors++; in nftl_add_mtd()
74 temp = nftl->cylinders * nftl->sectors; in nftl_add_mtd()
79 temp = nftl->heads * nftl->sectors; in nftl_add_mtd()
84 if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) { in nftl_add_mtd()
93 nftl->cylinders, nftl->heads , nftl->sectors, in nftl_add_mtd()
95 (long)nftl->sectors ); in nftl_add_mtd()
770 geo->sectors = nftl->sectors; in nftl_getgeo()
A Dinftlcore.c78 inftl->sectors = inftl->mbd.size / temp; in inftl_add_mtd()
80 inftl->sectors++; in inftl_add_mtd()
81 temp = inftl->cylinders * inftl->sectors; in inftl_add_mtd()
86 temp = inftl->heads * inftl->sectors; in inftl_add_mtd()
91 if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { in inftl_add_mtd()
100 inftl->cylinders, inftl->heads , inftl->sectors, in inftl_add_mtd()
102 (long)inftl->sectors ); in inftl_add_mtd()
922 geo->sectors = inftl->sectors; in inftl_getgeo()
/drivers/usb/storage/
A Ddatafab.c60 unsigned long sectors; /* total sector count */ member
138 u32 sectors) in datafab_read_data() argument
153 if (sectors > 0x0FFFFFFF) in datafab_read_data()
162 totallen = sectors * info->ssize; in datafab_read_data()
221 u32 sectors) in datafab_write_data() argument
237 if (sectors > 0x0FFFFFFF) in datafab_write_data()
246 totallen = sectors * info->ssize; in datafab_write_data()
420 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device()
582 info->sectors, info->ssize); in datafab_transport()
586 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in datafab_transport()
A Djumpshot.c96 unsigned long sectors; /* total sector count */ member
156 u32 sectors) in jumpshot_read_data() argument
174 totallen = sectors * info->ssize; in jumpshot_read_data()
233 u32 sectors) in jumpshot_write_data() argument
251 totallen = sectors * info->ssize; in jumpshot_write_data()
354 info->sectors = ((u32)(reply[117]) << 24) | in jumpshot_id_device()
513 info->sectors, info->ssize); in jumpshot_transport()
517 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in jumpshot_transport()
A Dsddr55.c190 unsigned short sectors) { in sddr55_read_data() argument
209 len = min_t(unsigned int, sectors, info->blocksize >> in sddr55_read_data()
217 while (sectors>0) { in sddr55_read_data()
227 pages = min_t(unsigned int, sectors << info->smallpageshift, in sddr55_read_data()
295 sectors -= pages >> info->smallpageshift; in sddr55_read_data()
309 unsigned short sectors) { in sddr55_write_data() argument
336 len = min_t(unsigned int, sectors, info->blocksize >> in sddr55_write_data()
344 while (sectors > 0) { in sddr55_write_data()
354 pages = min_t(unsigned int, sectors << info->smallpageshift, in sddr55_write_data()
503 sectors -= pages >> info->smallpageshift; in sddr55_write_data()
A Dalauda.c917 unsigned int sectors) in alauda_read_data() argument
938 len = min(sectors, blocksize) * (pagesize + 64); in alauda_read_data()
952 while (sectors > 0) { in alauda_read_data()
968 pages = min(sectors, blocksize - page); in alauda_read_data()
1001 sectors -= pages; in alauda_read_data()
1012 unsigned int sectors) in alauda_write_data() argument
1030 len = min(sectors, blocksize) * pagesize; in alauda_write_data()
1054 while (sectors > 0) { in alauda_write_data()
1056 unsigned int pages = min(sectors, blocksize - page); in alauda_write_data()
1078 sectors -= pages; in alauda_write_data()
/drivers/md/bcache/
A Dalloc.c86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument
93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
601 unsigned int sectors, in bch_alloc_sectors() argument
649 sectors = min(sectors, b->sectors_free); in bch_alloc_sectors()
651 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); in bch_alloc_sectors()
652 SET_KEY_SIZE(k, sectors); in bch_alloc_sectors()
663 b->sectors_free -= sectors; in bch_alloc_sectors()
666 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); in bch_alloc_sectors()
668 atomic_long_add(sectors, in bch_alloc_sectors()
A Drequest.c117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() local
123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
129 sectors)); in bch_data_invalidate()
367 unsigned int sectors, congested; in check_should_bypass() local
453 sectors = max(task->sequential_io, in check_should_bypass()
457 sectors >= dc->sequential_cutoff >> 9) { in check_should_bypass()
462 if (congested && sectors >= congested) { in check_should_bypass()
550 BUG_ON(bio_sectors <= sectors); in cache_lookup_fn()
893 struct bio *bio, unsigned int sectors) in cached_dev_cache_miss() argument
[all …]
/drivers/mtd/nand/
A Decc-mtk.c66 u32 sectors; member
150 if (dec & ecc->sectors) { in mtk_ecc_irq()
156 ecc->sectors = 0; in mtk_ecc_irq()
213 if (config->sectors) in mtk_ecc_config()
214 ecc->sectors = 1 << (config->sectors - 1); in mtk_ecc_config()
221 int sectors) in mtk_ecc_get_stats() argument
229 for (i = 0; i < sectors; i++) { in mtk_ecc_get_stats()
/drivers/mtd/spi-nor/
A DKconfig14 bool "Use small 4096 B erase sectors"
17 Many flash memories support erasing small (4096 B) sectors. Depending
21 small sectors. On the other hand erasing should be faster when using
22 64 KiB block instead of 16 × 4 KiB sectors.
/drivers/ata/
A Dlibata-core.c76 u16 heads, u16 sectors);
1172 u64 sectors = 0; in ata_tf_to_lba48() local
1181 return sectors; in ata_tf_to_lba48()
1186 u64 sectors = 0; in ata_tf_to_lba() local
1193 return sectors; in ata_tf_to_lba()
2136 tf.nsect = sectors; in ata_read_log_page()
2704 dev->sectors = id[56]; in ata_dev_config_chs()
2709 dev->sectors = id[6]; in ata_dev_config_chs()
2976 dev->sectors = 0; in ata_dev_configure()
4601 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) in ata_dev_init_params()
[all …]
/drivers/mmc/core/
A Dmmc_test.c100 unsigned int sectors; member
547 tr->sectors = sectors; in mmc_test_save_transfer_result()
571 mmc_hostname(test->card->host), sectors, sectors >> 1, in mmc_test_print_rate()
599 sectors >> 1, (sectors & 1 ? ".5" : ""), in mmc_test_print_avg_rate()
613 return card->ext_csd.sectors; in mmc_test_capacity()
954 int sectors; in mmc_test_transfer() local
962 sectors++; in mmc_test_transfer()
964 if ((sectors * 512) > BUFFER_SIZE) in mmc_test_transfer()
969 for (i = 0; i < sectors; i++) { in mmc_test_transfer()
982 for (; i < sectors * 512; i++) { in mmc_test_transfer()
[all …]
/drivers/ps3/
A Dps3stor_lib.c265 u64 start_sector, u64 sectors, int write) in ps3stor_read_write_sectors() argument
272 __func__, __LINE__, op, sectors, start_sector); in ps3stor_read_write_sectors()
276 start_sector, sectors, 0, lpar, in ps3stor_read_write_sectors()
279 start_sector, sectors, 0, lpar, in ps3stor_read_write_sectors()
/drivers/mtd/nand/raw/
A Dmtk_nand.c729 u32 sectors) in mtk_nfc_read_fdm() argument
738 for (i = 0; i < sectors; i++) { in mtk_nfc_read_fdm()
915 u32 sectors) in mtk_nfc_update_ecc_stats() argument
926 memset(buf, 0xff, sectors * chip->ecc.size); in mtk_nfc_update_ecc_stats()
927 for (i = 0; i < sectors; i++) in mtk_nfc_update_ecc_stats()
946 u32 column, sectors, start, end, reg; in mtk_nfc_read_subpage() local
957 sectors = end - start; in mtk_nfc_read_subpage()
960 len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); in mtk_nfc_read_subpage()
980 nfc->ecc_cfg.sectors = sectors; in mtk_nfc_read_subpage()
1011 ADDRCNTR_SEC(reg) >= sectors, 10, in mtk_nfc_read_subpage()
[all …]

Completed in 99 milliseconds

123456