| /drivers/md/ |
| A D | raid0.c | 134 mddev->chunk_sectors << 9, blksize); in create_strip_zones() 385 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits() 387 lim.chunk_sectors = mddev->chunk_sectors; in raid0_set_limits() 400 if (mddev->chunk_sectors == 0) { in raid0_run() 507 mddev->chunk_sectors; in raid0_handle_discard() 509 mddev->chunk_sectors; in raid0_handle_discard() 512 mddev->chunk_sectors) + in raid0_handle_discard() 515 mddev->chunk_sectors) + in raid0_handle_discard() 528 mddev->chunk_sectors; in raid0_handle_discard() 607 chunk_sects = mddev->chunk_sectors; in raid0_make_request() [all …]
|
| A D | md-linear.c | 74 lim.max_hw_sectors = mddev->chunk_sectors; in linear_set_limits() 75 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in linear_set_limits() 76 lim.io_min = mddev->chunk_sectors << 9; in linear_set_limits() 124 if (mddev->chunk_sectors) { in linear_conf() 126 sector_div(sectors, mddev->chunk_sectors); in linear_conf() 127 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf() 304 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
|
| A D | dm-zone.c | 395 lim->chunk_sectors = 0; in dm_set_zones_restrictions() 401 if (q->limits.chunk_sectors != lim->chunk_sectors) { in dm_set_zones_restrictions()
|
| A D | raid5.c | 2958 : conf->chunk_sectors; in raid5_compute_sector() 3154 : conf->chunk_sectors; in raid5_compute_blocknr() 5317 unsigned int chunk_sectors; in in_chunk_boundary() local 5320 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary() 5321 return chunk_sectors >= in in_chunk_boundary() 7310 max(conf->chunk_sectors, in alloc_scratch_buffer() 7831 int chunk_sectors; in raid5_run() local 7854 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run() 7885 here_old * chunk_sectors) in raid5_run() 8398 mddev->chunk_sectors) in check_reshape() [all …]
|
| A D | dm-zoned-target.c | 994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_io_hints() local 1004 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints() 1005 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints() 1008 limits->chunk_sectors = chunk_sectors; in dmz_io_hints() 1009 limits->max_sectors = chunk_sectors; in dmz_io_hints()
|
| A D | raid5-ppl.c | 329 (data_sector >> ilog2(conf->chunk_sectors) == in ppl_log_stripe() 330 data_sector_last >> ilog2(conf->chunk_sectors)) && in ppl_log_stripe() 812 if ((pp_size >> 9) < conf->chunk_sectors) { in ppl_recover_entry() 821 (data_disks - 1) * conf->chunk_sectors + in ppl_recover_entry() 825 strip_sectors = conf->chunk_sectors; in ppl_recover_entry() 859 (disk * conf->chunk_sectors); in ppl_recover_entry()
|
| A D | dm-raid.c | 718 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur() 731 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new() 985 if (region_size < rs->md.chunk_sectors) { in validate_region_size() 1166 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params() 1492 if (rs->md.chunk_sectors) in parse_raid_params() 1493 max_io_len = rs->md.chunk_sectors; in parse_raid_params() 1908 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change() 2150 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() 2261 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation() 2316 if (mddev->chunk_sectors != mddev->new_chunk_sectors) in super_init_validation() [all …]
|
| A D | dm-unstripe.c | 180 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
|
| A D | raid10.c | 1803 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1805 dev_start = first_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1810 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1812 dev_end = last_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1915 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status() 3124 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high() 3844 chunk = mddev->chunk_sectors; in setup_geo() 4011 lim.io_min = mddev->chunk_sectors << 9; in raid10_set_queue_limits() 4012 lim.chunk_sectors = mddev->chunk_sectors; in raid10_set_queue_limits() 4274 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0() [all …]
|
| A D | md.c | 1383 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate() 1412 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate() 1559 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync() 4033 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store() 4264 mddev->chunk_sectors << 9); in chunk_size_show() 4295 mddev->chunk_sectors = n >> 9; in chunk_size_store() 5358 chunk = mddev->chunk_sectors; in max_sync_store() 5473 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store() 6441 mddev->chunk_sectors = 0; in md_clean() 6844 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info() [all …]
|
| A D | raid5.h | 575 int chunk_sectors; member
|
| A D | dm-stripe.c | 461 limits->chunk_sectors = sc->chunk_size; in stripe_io_hints()
|
| A D | md.h | 423 int chunk_sectors; member
|
| A D | dm-table.c | 1825 zone_sectors = ti_limits.chunk_sectors; in dm_calculate_queue_limits() 1878 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
|
| A D | raid5-cache.c | 198 sector_div(sect, conf->chunk_sectors); in r5c_tree_index() 364 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf))) in r5c_check_cached_full_stripe()
|
| A D | raid1.c | 3370 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape() 3373 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
|
| A D | dm.c | 1817 unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; in __send_zone_reset_all_emulated()
|
| /drivers/char/ |
| A D | ps3flash.c | 26 u64 chunk_sectors; member 38 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors() 118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 151 sector += priv->chunk_sectors; in ps3flash_read() 187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 226 sector += priv->chunk_sectors; in ps3flash_write() 376 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
|
| /drivers/nvme/host/ |
| A D | zns.c | 115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
|
| A D | core.c | 2165 lim->chunk_sectors = iob; in nvme_set_chunk_sectors()
|
| /drivers/block/ |
| A D | ublk_drv.c | 308 return p->dev_sectors >> ilog2(p->chunk_sectors); in ublk_get_nr_zones() 375 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; in ublk_report_zones() 603 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors) in ublk_validate_params() 2798 .chunk_sectors = p->chunk_sectors, in ublk_ctrl_start_dev()
|
| A D | zloop.c | 888 .chunk_sectors = opts->zone_size, in zloop_ctl_add() 1003 set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones); in zloop_ctl_add()
|
| A D | virtio_blk.c | 760 lim->chunk_sectors = vblk->zone_sectors; in virtblk_read_zoned_limits()
|
| /drivers/scsi/ |
| A D | sd_zbc.c | 639 lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); in sd_zbc_read_zones()
|
| /drivers/block/null_blk/ |
| A D | zoned.c | 168 lim->chunk_sectors = dev->zone_size_sects; in null_init_zoned_dev()
|