| /linux/drivers/md/ |
| A D | raid0.c | 84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 134 mddev->chunk_sectors << 9, blksize); in create_strip_zones() 385 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits() 400 if (mddev->chunk_sectors == 0) { in raid0_run() 501 mddev->chunk_sectors; in raid0_handle_discard() 503 mddev->chunk_sectors; in raid0_handle_discard() 506 mddev->chunk_sectors) + in raid0_handle_discard() 509 mddev->chunk_sectors) + in raid0_handle_discard() 522 mddev->chunk_sectors; in raid0_handle_discard() 601 chunk_sects = mddev->chunk_sectors; in raid0_make_request() [all …]
|
| A D | dm-zoned-target.c | 994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_io_hints() local 1004 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints() 1005 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints() 1008 limits->chunk_sectors = chunk_sectors; in dmz_io_hints() 1009 limits->max_sectors = chunk_sectors; in dmz_io_hints()
|
| A D | raid5.c | 2970 : conf->chunk_sectors; in raid5_compute_sector() 3166 : conf->chunk_sectors; in raid5_compute_blocknr() 5370 unsigned int chunk_sectors; in in_chunk_boundary() local 5373 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary() 5374 return chunk_sectors >= in in_chunk_boundary() 7316 max(conf->chunk_sectors, in alloc_scratch_buffer() 7837 int chunk_sectors; in raid5_run() local 7860 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run() 7891 here_old * chunk_sectors) in raid5_run() 8404 mddev->chunk_sectors) in check_reshape() [all …]
|
| A D | raid5-ppl.c | 329 (data_sector >> ilog2(conf->chunk_sectors) == in ppl_log_stripe() 330 data_sector_last >> ilog2(conf->chunk_sectors)) && in ppl_log_stripe() 812 if ((pp_size >> 9) < conf->chunk_sectors) { in ppl_recover_entry() 821 (data_disks - 1) * conf->chunk_sectors + in ppl_recover_entry() 825 strip_sectors = conf->chunk_sectors; in ppl_recover_entry() 859 (disk * conf->chunk_sectors); in ppl_recover_entry()
|
| A D | dm-raid.c | 718 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur() 731 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new() 985 if (region_size < rs->md.chunk_sectors) { in validate_region_size() 1166 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params() 1496 if (rs->md.chunk_sectors) in parse_raid_params() 1497 max_io_len = rs->md.chunk_sectors; in parse_raid_params() 1912 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change() 2154 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() 2265 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation() 2320 if (mddev->chunk_sectors != mddev->new_chunk_sectors) in super_init_validation() [all …]
|
| A D | dm-unstripe.c | 180 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
|
| A D | raid5.h | 579 int chunk_sectors; member
|
| A D | md.c | 1323 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate() 1352 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate() 1499 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync() 3990 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store() 4221 mddev->chunk_sectors << 9); in chunk_size_show() 4252 mddev->chunk_sectors = n >> 9; in chunk_size_store() 5286 chunk = mddev->chunk_sectors; in max_sync_store() 5401 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store() 6377 mddev->chunk_sectors = 0; in md_clean() 6784 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info() [all …]
|
| A D | dm-zone.c | 384 lim->chunk_sectors = 0; in dm_set_zones_restrictions()
|
| A D | raid10.c | 1762 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1764 dev_start = first_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1769 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1771 dev_end = last_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1874 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status() 3097 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high() 3818 chunk = mddev->chunk_sectors; in setup_geo() 3985 lim.io_min = mddev->chunk_sectors << 9; in raid10_set_queue_limits() 4248 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0() 5085 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
|
| A D | md.h | 395 int chunk_sectors; member
|
| A D | dm-table.c | 1636 zone_sectors = ti_limits.chunk_sectors; in dm_calculate_queue_limits() 1689 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
|
| /linux/drivers/char/ |
| A D | ps3flash.c | 26 u64 chunk_sectors; member 38 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors() 118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 151 sector += priv->chunk_sectors; in ps3flash_read() 187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 226 sector += priv->chunk_sectors; in ps3flash_write() 376 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
|
| /linux/block/ |
| A D | blk-settings.c | 102 lim->chunk_sectors); in blk_validate_zoned_limits() 198 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors)) in blk_validate_atomic_write_limits() 576 if (b->chunk_sectors) in blk_stack_limits() 577 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits() 601 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits() 602 t->chunk_sectors = 0; in blk_stack_limits()
|
| A D | blk-zoned.c | 540 zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1); in disk_get_and_lock_zone_wplug() 745 sector += disk->queue->limits.chunk_sectors) { in blk_zone_wplug_handle_reset_all() 1680 sector_t zone_sectors = disk->queue->limits.chunk_sectors; in blk_revalidate_zone_cb() 1755 sector_t zone_sectors = q->limits.chunk_sectors; in blk_revalidate_disk_zones()
|
| A D | blk.h | 354 return lim->chunk_sectors || bio->bi_vcnt != 1 || in bio_may_need_split()
|
| A D | blk-sysfs.c | 109 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors) in QUEUE_SYSFS_LIMIT_SHOW()
|
| A D | blk-core.c | 592 if (nr_sectors > q->limits.chunk_sectors) in blk_check_zone_append()
|
| /linux/include/uapi/linux/ |
| A D | ublk_cmd.h | 351 __u32 chunk_sectors; member
|
| /linux/include/linux/ |
| A D | blkdev.h | 360 unsigned int chunk_sectors; member 692 return sector >> ilog2(disk->queue->limits.chunk_sectors); in disk_zone_no() 1191 unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); in queue_limits_max_zone_append_sectors() 1358 return q->limits.chunk_sectors; in bdev_zone_sectors()
|
| /linux/drivers/nvme/host/ |
| A D | zns.c | 115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
|
| /linux/drivers/block/ |
| A D | ublk_drv.c | 246 return p->dev_sectors >> ilog2(p->chunk_sectors); in ublk_get_nr_zones() 313 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; in ublk_report_zones() 541 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors) in ublk_validate_params() 2199 .chunk_sectors = p->chunk_sectors, in ublk_ctrl_start_dev()
|
| /linux/drivers/scsi/ |
| A D | sd_zbc.c | 635 lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); in sd_zbc_read_zones()
|
| /linux/Documentation/ABI/stable/ |
| A D | sysfs-block | 208 What: /sys/block/<disk>/queue/chunk_sectors 212 [RO] chunk_sectors has different meaning depending on the type 213 of the disk. For a RAID device (dm-raid), chunk_sectors 216 host-managed, chunk_sectors indicates the size in 512B sectors
|
| /linux/drivers/block/null_blk/ |
| A D | zoned.c | 168 lim->chunk_sectors = dev->zone_size_sects; in null_init_zoned_dev()
|