Lines Matching refs:bdev
155 int blkdev_report_zones(struct block_device *bdev, sector_t sector, in blkdev_report_zones() argument
158 struct gendisk *disk = bdev->bd_disk; in blkdev_report_zones()
166 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) in blkdev_report_zones()
177 static int blkdev_zone_reset_all(struct block_device *bdev) in blkdev_zone_reset_all() argument
181 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); in blkdev_zone_reset_all()
201 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, in blkdev_zone_mgmt() argument
204 sector_t zone_sectors = bdev_zone_sectors(bdev); in blkdev_zone_mgmt()
205 sector_t capacity = bdev_nr_sectors(bdev); in blkdev_zone_mgmt()
210 if (!bdev_is_zoned(bdev)) in blkdev_zone_mgmt()
213 if (bdev_read_only(bdev)) in blkdev_zone_mgmt()
224 if (!bdev_is_zone_start(bdev, sector)) in blkdev_zone_mgmt()
227 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) in blkdev_zone_mgmt()
235 return blkdev_zone_reset_all(bdev); in blkdev_zone_mgmt()
238 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL); in blkdev_zone_mgmt()
272 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, in blkdev_report_zones_ioctl() argument
283 if (!bdev_is_zoned(bdev)) in blkdev_report_zones_ioctl()
293 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, in blkdev_report_zones_ioctl()
305 static int blkdev_truncate_zone_range(struct block_device *bdev, in blkdev_truncate_zone_range() argument
311 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) in blkdev_truncate_zone_range()
318 return truncate_bdev_range(bdev, mode, start, end); in blkdev_truncate_zone_range()
325 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, in blkdev_zone_mgmt_ioctl() argument
336 if (!bdev_is_zoned(bdev)) in blkdev_zone_mgmt_ioctl()
350 inode_lock(bdev->bd_mapping->host); in blkdev_zone_mgmt_ioctl()
351 filemap_invalidate_lock(bdev->bd_mapping); in blkdev_zone_mgmt_ioctl()
352 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); in blkdev_zone_mgmt_ioctl()
369 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors); in blkdev_zone_mgmt_ioctl()
373 filemap_invalidate_unlock(bdev->bd_mapping); in blkdev_zone_mgmt_ioctl()
374 inode_unlock(bdev->bd_mapping->host); in blkdev_zone_mgmt_ioctl()
1123 struct block_device *bdev = bio->bi_bdev; in blk_zone_plug_bio() local
1125 if (WARN_ON_ONCE(!bdev->bd_disk->zone_wplugs_hash)) in blk_zone_plug_bio()
1147 if (!bdev_emulates_zone_append(bdev)) { in blk_zone_plug_bio()
1159 bdev_zone_sectors(bdev)); in blk_zone_plug_bio()
1286 struct block_device *bdev; in blk_zone_wplug_bio_work() local
1314 bdev = bio->bi_bdev; in blk_zone_wplug_bio_work()
1322 if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) { in blk_zone_wplug_bio_work()
1323 bdev->bd_disk->fops->submit_bio(bio); in blk_zone_wplug_bio_work()
1324 blk_queue_exit(bdev->bd_disk->queue); in blk_zone_wplug_bio_work()
1816 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, in blk_zone_issue_zeroout() argument
1821 if (WARN_ON_ONCE(!bdev_is_zoned(bdev))) in blk_zone_issue_zeroout()
1824 ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, in blk_zone_issue_zeroout()
1834 ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector); in blk_zone_issue_zeroout()
1842 return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0); in blk_zone_issue_zeroout()