Lines Matching refs:bdev
21 static int blkpg_do_ioctl(struct block_device *bdev, in blkpg_do_ioctl() argument
24 struct gendisk *disk = bdev->bd_disk; in blkpg_do_ioctl()
32 if (bdev_is_partition(bdev)) in blkpg_do_ioctl()
44 if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev))) in blkpg_do_ioctl()
67 static int blkpg_ioctl(struct block_device *bdev, in blkpg_ioctl() argument
76 return blkpg_do_ioctl(bdev, udata, op); in blkpg_ioctl()
87 static int compat_blkpg_ioctl(struct block_device *bdev, in compat_blkpg_ioctl() argument
96 return blkpg_do_ioctl(bdev, compat_ptr(udata), op); in compat_blkpg_ioctl()
105 static int blk_validate_byte_range(struct block_device *bdev, in blk_validate_byte_range() argument
108 unsigned int bs_mask = bdev_logical_block_size(bdev) - 1; in blk_validate_byte_range()
115 if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev)) in blk_validate_byte_range()
121 static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, in blk_ioctl_discard() argument
135 if (!bdev_max_discard_sectors(bdev)) in blk_ioctl_discard()
140 if (bdev_read_only(bdev)) in blk_ioctl_discard()
142 err = blk_validate_byte_range(bdev, start, len); in blk_ioctl_discard()
146 inode_lock(bdev->bd_mapping->host); in blk_ioctl_discard()
147 filemap_invalidate_lock(bdev->bd_mapping); in blk_ioctl_discard()
148 err = truncate_bdev_range(bdev, mode, start, start + len - 1); in blk_ioctl_discard()
163 bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, in blk_ioctl_discard()
178 filemap_invalidate_unlock(bdev->bd_mapping); in blk_ioctl_discard()
179 inode_unlock(bdev->bd_mapping->host); in blk_ioctl_discard()
183 static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, in blk_ioctl_secure_erase() argument
192 if (!bdev_max_secure_erase_sectors(bdev)) in blk_ioctl_secure_erase()
202 end > bdev_nr_bytes(bdev)) in blk_ioctl_secure_erase()
205 inode_lock(bdev->bd_mapping->host); in blk_ioctl_secure_erase()
206 filemap_invalidate_lock(bdev->bd_mapping); in blk_ioctl_secure_erase()
207 err = truncate_bdev_range(bdev, mode, start, end - 1); in blk_ioctl_secure_erase()
209 err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, in blk_ioctl_secure_erase()
211 filemap_invalidate_unlock(bdev->bd_mapping); in blk_ioctl_secure_erase()
212 inode_unlock(bdev->bd_mapping->host); in blk_ioctl_secure_erase()
217 static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, in blk_ioctl_zeroout() argument
238 if (end >= (uint64_t)bdev_nr_bytes(bdev)) in blk_ioctl_zeroout()
244 inode_lock(bdev->bd_mapping->host); in blk_ioctl_zeroout()
245 filemap_invalidate_lock(bdev->bd_mapping); in blk_ioctl_zeroout()
246 err = truncate_bdev_range(bdev, mode, start, end); in blk_ioctl_zeroout()
250 err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, in blk_ioctl_zeroout()
254 filemap_invalidate_unlock(bdev->bd_mapping); in blk_ioctl_zeroout()
255 inode_unlock(bdev->bd_mapping->host); in blk_ioctl_zeroout()
307 int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode, in blkdev_compat_ptr_ioctl() argument
310 struct gendisk *disk = bdev->bd_disk; in blkdev_compat_ptr_ioctl()
313 return disk->fops->ioctl(bdev, mode, cmd, in blkdev_compat_ptr_ioctl()
321 static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode) in blkdev_pr_allowed() argument
324 if (bdev_is_partition(bdev)) in blkdev_pr_allowed()
336 static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode, in blkdev_pr_register() argument
339 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_register()
342 if (!blkdev_pr_allowed(bdev, mode)) in blkdev_pr_register()
351 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags); in blkdev_pr_register()
354 static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode, in blkdev_pr_reserve() argument
357 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_reserve()
360 if (!blkdev_pr_allowed(bdev, mode)) in blkdev_pr_reserve()
369 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve()
372 static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode, in blkdev_pr_release() argument
375 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_release()
378 if (!blkdev_pr_allowed(bdev, mode)) in blkdev_pr_release()
387 return ops->pr_release(bdev, rsv.key, rsv.type); in blkdev_pr_release()
390 static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode, in blkdev_pr_preempt() argument
393 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_preempt()
396 if (!blkdev_pr_allowed(bdev, mode)) in blkdev_pr_preempt()
405 return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort); in blkdev_pr_preempt()
408 static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode, in blkdev_pr_clear() argument
411 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_clear()
414 if (!blkdev_pr_allowed(bdev, mode)) in blkdev_pr_clear()
423 return ops->pr_clear(bdev, c.key); in blkdev_pr_clear()
426 static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd, in blkdev_flushbuf() argument
432 mutex_lock(&bdev->bd_holder_lock); in blkdev_flushbuf()
433 if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync) in blkdev_flushbuf()
434 bdev->bd_holder_ops->sync(bdev); in blkdev_flushbuf()
436 mutex_unlock(&bdev->bd_holder_lock); in blkdev_flushbuf()
437 sync_blockdev(bdev); in blkdev_flushbuf()
440 invalidate_bdev(bdev); in blkdev_flushbuf()
444 static int blkdev_roset(struct block_device *bdev, unsigned cmd, in blkdev_roset() argument
454 if (bdev->bd_disk->fops->set_read_only) { in blkdev_roset()
455 ret = bdev->bd_disk->fops->set_read_only(bdev, n); in blkdev_roset()
460 bdev_set_flag(bdev, BD_READ_ONLY); in blkdev_roset()
462 bdev_clear_flag(bdev, BD_READ_ONLY); in blkdev_roset()
466 static int blkdev_getgeo(struct block_device *bdev, in blkdev_getgeo() argument
469 struct gendisk *disk = bdev->bd_disk; in blkdev_getgeo()
483 geo.start = get_start_sect(bdev); in blkdev_getgeo()
484 ret = disk->fops->getgeo(bdev, &geo); in blkdev_getgeo()
500 static int compat_hdio_getgeo(struct block_device *bdev, in compat_hdio_getgeo() argument
503 struct gendisk *disk = bdev->bd_disk; in compat_hdio_getgeo()
517 geo.start = get_start_sect(bdev); in compat_hdio_getgeo()
518 ret = disk->fops->getgeo(bdev, &geo); in compat_hdio_getgeo()
564 static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode, in blkdev_common_ioctl() argument
572 return blkdev_flushbuf(bdev, cmd, arg); in blkdev_common_ioctl()
574 return blkdev_roset(bdev, cmd, arg); in blkdev_common_ioctl()
576 return blk_ioctl_discard(bdev, mode, arg); in blkdev_common_ioctl()
578 return blk_ioctl_secure_erase(bdev, mode, argp); in blkdev_common_ioctl()
580 return blk_ioctl_zeroout(bdev, mode, arg); in blkdev_common_ioctl()
582 return put_u64(argp, bdev->bd_disk->diskseq); in blkdev_common_ioctl()
584 return blkdev_report_zones_ioctl(bdev, cmd, arg); in blkdev_common_ioctl()
589 return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg); in blkdev_common_ioctl()
591 return put_uint(argp, bdev_zone_sectors(bdev)); in blkdev_common_ioctl()
593 return put_uint(argp, bdev_nr_zones(bdev)); in blkdev_common_ioctl()
595 return put_int(argp, bdev_read_only(bdev) != 0); in blkdev_common_ioctl()
597 return put_int(argp, bdev_logical_block_size(bdev)); in blkdev_common_ioctl()
599 return put_uint(argp, bdev_physical_block_size(bdev)); in blkdev_common_ioctl()
601 return put_uint(argp, bdev_io_min(bdev)); in blkdev_common_ioctl()
603 return put_uint(argp, bdev_io_opt(bdev)); in blkdev_common_ioctl()
605 return put_int(argp, bdev_alignment_offset(bdev)); in blkdev_common_ioctl()
610 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl()
613 return put_ushort(argp, !bdev_nonrot(bdev)); in blkdev_common_ioctl()
618 bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE; in blkdev_common_ioctl()
623 if (bdev_is_partition(bdev)) in blkdev_common_ioctl()
625 return disk_scan_partitions(bdev->bd_disk, in blkdev_common_ioctl()
630 return blk_trace_ioctl(bdev, cmd, argp); in blkdev_common_ioctl()
634 return blk_crypto_ioctl(bdev, cmd, argp); in blkdev_common_ioctl()
636 return blkdev_pr_register(bdev, mode, argp); in blkdev_common_ioctl()
638 return blkdev_pr_reserve(bdev, mode, argp); in blkdev_common_ioctl()
640 return blkdev_pr_release(bdev, mode, argp); in blkdev_common_ioctl()
642 return blkdev_pr_preempt(bdev, mode, argp, false); in blkdev_common_ioctl()
644 return blkdev_pr_preempt(bdev, mode, argp, true); in blkdev_common_ioctl()
646 return blkdev_pr_clear(bdev, mode, argp); in blkdev_common_ioctl()
648 return blk_get_meta_cap(bdev, cmd, argp); in blkdev_common_ioctl()
660 struct block_device *bdev = I_BDEV(file->f_mapping->host); in blkdev_ioctl() local
668 return blkdev_getgeo(bdev, argp); in blkdev_ioctl()
670 return blkpg_ioctl(bdev, argp); in blkdev_ioctl()
678 (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512); in blkdev_ioctl()
680 if (bdev_nr_sectors(bdev) > ~0UL) in blkdev_ioctl()
682 return put_ulong(argp, bdev_nr_sectors(bdev)); in blkdev_ioctl()
686 return put_int(argp, block_size(bdev)); in blkdev_ioctl()
690 return put_u64(argp, bdev_nr_bytes(bdev)); in blkdev_ioctl()
694 return blk_trace_ioctl(bdev, cmd, argp); in blkdev_ioctl()
699 ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp); in blkdev_ioctl()
703 if (!bdev->bd_disk->fops->ioctl) in blkdev_ioctl()
705 return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); in blkdev_ioctl()
721 struct block_device *bdev = I_BDEV(file->f_mapping->host); in compat_blkdev_ioctl() local
722 struct gendisk *disk = bdev->bd_disk; in compat_blkdev_ioctl()
728 return compat_hdio_getgeo(bdev, argp); in compat_blkdev_ioctl()
730 return compat_blkpg_ioctl(bdev, argp); in compat_blkdev_ioctl()
738 (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512); in compat_blkdev_ioctl()
740 if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0) in compat_blkdev_ioctl()
742 return compat_put_ulong(argp, bdev_nr_sectors(bdev)); in compat_blkdev_ioctl()
746 return put_int(argp, bdev_logical_block_size(bdev)); in compat_blkdev_ioctl()
750 return put_u64(argp, bdev_nr_bytes(bdev)); in compat_blkdev_ioctl()
754 return blk_trace_ioctl(bdev, cmd, argp); in compat_blkdev_ioctl()
759 ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp); in compat_blkdev_ioctl()
761 ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); in compat_blkdev_ioctl()
795 struct block_device *bdev, in blkdev_cmd_discard() argument
805 if (!bdev_max_discard_sectors(bdev)) in blkdev_cmd_discard()
809 if (bdev_read_only(bdev)) in blkdev_cmd_discard()
811 err = blk_validate_byte_range(bdev, start, len); in blkdev_cmd_discard()
815 err = filemap_invalidate_pages(bdev->bd_mapping, start, in blkdev_cmd_discard()
821 bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, gfp); in blkdev_cmd_discard()
853 struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host); in blkdev_uring_cmd() local
871 return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait); in blkdev_uring_cmd()