| /block/ |
| A D | bdev.c | 76 bdev, ret); in bdev_write_inode() 246 if (!bdev) in sync_blockdev_nowait() 258 if (!bdev) in sync_blockdev() 298 error = bdev->bd_holder_ops->freeze(bdev); in bdev_freeze() 341 error = bdev->bd_holder_ops->thaw(bdev); in bdev_thaw() 368 memset(&ei->bdev, 0, sizeof(ei->bdev)); in bdev_alloc_inode() 386 if (bdev->bd_disk && bdev->bd_disk->bdi) in bdev_free_inode() 721 bdev->bd_disk->fops->release(bdev->bd_disk); in blkdev_put_whole() 835 bdev = &BDEV_I(inode)->bdev; in blkdev_get_no_open() 1047 if (!bdev) in bdev_file_open_by_dev() [all …]
|
| A D | ioctl.c | 32 if (bdev_is_partition(bdev)) in blkpg_do_ioctl() 140 if (bdev_read_only(bdev)) in blk_ioctl_discard() 202 end > bdev_nr_bytes(bdev)) in blk_ioctl_secure_erase() 324 if (bdev_is_partition(bdev)) in blkdev_pr_allowed() 433 if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync) in blkdev_flushbuf() 434 bdev->bd_holder_ops->sync(bdev); in blkdev_flushbuf() 437 sync_blockdev(bdev); in blkdev_flushbuf() 440 invalidate_bdev(bdev); in blkdev_flushbuf() 455 ret = bdev->bd_disk->fops->set_read_only(bdev, n); in blkdev_roset() 705 return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); in blkdev_ioctl() [all …]
|
| A D | blk-lib.c | 18 if (bdev_is_partition(bdev)) in bio_discard_limit() 19 sector += bdev->bd_start_sect; in bio_discard_limit() 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio() 110 return min(bdev_write_zeroes_sectors(bdev), in bio_write_zeroes_limit() 151 sector_t limit = bio_write_zeroes_limit(bdev); in blkdev_issue_write_zeroes() 177 if (ret && !bdev_write_zeroes_sectors(bdev)) in blkdev_issue_write_zeroes() 277 sector_t limit = bio_write_zeroes_limit(bdev); in __blkdev_issue_zeroout() 279 if (bdev_read_only(bdev)) in __blkdev_issue_zeroout() 315 if (bdev_read_only(bdev)) in blkdev_issue_zeroout() 318 if (bdev_write_zeroes_sectors(bdev)) { in blkdev_issue_zeroout() [all …]
|
| A D | holder.c | 70 if (bdev->bd_disk == disk) in bd_link_disk_holder() 77 mutex_lock(&bdev->bd_disk->open_mutex); in bd_link_disk_holder() 78 if (!disk_live(bdev->bd_disk)) { in bd_link_disk_holder() 79 mutex_unlock(&bdev->bd_disk->open_mutex); in bd_link_disk_holder() 82 kobject_get(bdev->bd_holder_dir); in bd_link_disk_holder() 83 mutex_unlock(&bdev->bd_disk->open_mutex); in bd_link_disk_holder() 86 WARN_ON_ONCE(!bdev->bd_holder); in bd_link_disk_holder() 88 holder = bd_find_holder_disk(bdev, disk); in bd_link_disk_holder() 90 kobject_put(bdev->bd_holder_dir); in bd_link_disk_holder() 103 holder->holder_dir = bdev->bd_holder_dir; in bd_link_disk_holder() [all …]
|
| A D | fops.c | 42 !bdev_iter_is_aligned(bdev, iter); in blkdev_dio_invalid() 319 struct block_device *bdev, in __blkdev_direct_IO_async() argument 445 iomap->bdev = bdev; in blkdev_iomap_begin() 624 error = blkdev_issue_flush(bdev); in blkdev_fsync() 672 struct block_device *bdev; in blkdev_open() local 685 if (!bdev) in blkdev_open() 688 if (bdev_can_atomic_write(bdev)) in blkdev_open() 693 blkdev_put_no_open(bdev); in blkdev_open() 746 loff_t size = bdev_nr_bytes(bdev); in blkdev_write_iter() 750 if (bdev_read_only(bdev)) in blkdev_write_iter() [all …]
|
| A D | blk-zoned.c | 158 struct gendisk *disk = bdev->bd_disk; in blkdev_report_zones() 210 if (!bdev_is_zoned(bdev)) in blkdev_zone_mgmt() 213 if (bdev_read_only(bdev)) in blkdev_zone_mgmt() 224 if (!bdev_is_zone_start(bdev, sector)) in blkdev_zone_mgmt() 235 return blkdev_zone_reset_all(bdev); in blkdev_zone_mgmt() 283 if (!bdev_is_zoned(bdev)) in blkdev_report_zones_ioctl() 336 if (!bdev_is_zoned(bdev)) in blkdev_zone_mgmt_ioctl() 350 inode_lock(bdev->bd_mapping->host); in blkdev_zone_mgmt_ioctl() 1159 bdev_zone_sectors(bdev)); in blk_zone_plug_bio() 1286 struct block_device *bdev; in blk_zone_wplug_bio_work() local [all …]
|
| A D | blk-crypto.c | 397 bool blk_crypto_config_supported(struct block_device *bdev, in blk_crypto_config_supported() argument 403 return blk_crypto_config_supported_natively(bdev, cfg); in blk_crypto_config_supported() 423 int blk_crypto_start_using_key(struct block_device *bdev, in blk_crypto_start_using_key() argument 426 if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg)) in blk_crypto_start_using_key() 429 pr_warn_ratelimited("%pg: no support for wrapped keys\n", bdev); in blk_crypto_start_using_key() 450 void blk_crypto_evict_key(struct block_device *bdev, in blk_crypto_evict_key() argument 453 struct request_queue *q = bdev_get_queue(bdev); in blk_crypto_evict_key() 456 if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg)) in blk_crypto_evict_key() 469 pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err); in blk_crypto_evict_key() 595 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, in blk_crypto_ioctl() argument [all …]
|
| A D | blk-core.c | 779 struct block_device *bdev = bio->bi_bdev; in submit_bio_noacct() local 798 if (bdev_is_partition(bdev) && in submit_bio_noacct() 811 if (!bdev_write_cache(bdev)) { in submit_bio_noacct() 837 if (!bdev_max_discard_sectors(bdev)) in submit_bio_noacct() 935 struct block_device *bdev; in bio_poll() local 939 bdev = READ_ONCE(bio->bi_bdev); in bio_poll() 940 if (!bdev) in bio_poll() 943 q = bdev_get_queue(bdev); in bio_poll() 1034 update_io_ticks(bdev, start_time, false); in bdev_start_io_acct() 1062 update_io_ticks(bdev, now, true); in bdev_end_io_acct() [all …]
|
| A D | blk-cgroup.c | 786 struct block_device *bdev; in blkg_conf_open_bdev() local 789 if (ctx->bdev) in blkg_conf_open_bdev() 801 if (!bdev) in blkg_conf_open_bdev() 804 blkdev_put_no_open(bdev); in blkg_conf_open_bdev() 810 blkdev_put_no_open(bdev); in blkg_conf_open_bdev() 816 ctx->bdev = bdev; in blkg_conf_open_bdev() 833 if (ctx->bdev) in blkg_conf_open_bdev_frozen() 883 disk = ctx->bdev->bd_disk; in blkg_conf_prep() 1001 if (ctx->bdev) { in blkg_conf_exit() 1005 ctx->bdev = NULL; in blkg_conf_exit() [all …]
|
| A D | early-lookup.c | 23 struct block_device *bdev = dev_to_bdev(dev); in match_dev_by_uuid() local 26 if (!bdev->bd_meta_info || in match_dev_by_uuid() 27 strncasecmp(cmp->uuid, bdev->bd_meta_info->uuid, cmp->len)) in match_dev_by_uuid() 104 struct block_device *bdev = dev_to_bdev(dev); in match_dev_by_label() local 107 if (!bdev->bd_meta_info || strcmp(label, bdev->bd_meta_info->volname)) in match_dev_by_label()
|
| A D | blk.h | 109 void blkdev_put_no_open(struct block_device *bdev); 508 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, 510 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, 544 static inline int blkdev_report_zones_ioctl(struct block_device *bdev, in blkdev_report_zones_ioctl() argument 549 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, in blkdev_zone_mgmt_ioctl() argument 557 void bdev_add(struct block_device *bdev, dev_t dev); 558 void bdev_unhash(struct block_device *bdev); 559 void bdev_drop(struct block_device *bdev); 574 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors); 607 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, [all …]
|
| A D | bio.c | 249 bio->bi_bdev = bdev; in bio_init() 265 if (bdev) in bio_init() 306 bio->bi_bdev = bdev; in bio_reset() 518 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, in bio_alloc_bioset() 579 bio_init(bio, bdev, bvl, nr_vecs, opf); in bio_alloc_bioset() 581 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); in bio_alloc_bioset() 583 bio_init(bio, bdev, NULL, 0, opf); in bio_alloc_bioset() 866 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone() 892 int bio_init_clone(struct block_device *bdev, struct bio *bio, in bio_init_clone() argument 897 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); in bio_init_clone() [all …]
|
| A D | blk-settings.c | 936 if (blk_stack_limits(t, bdev_limits(bdev), in queue_limits_stack_bdev() 937 get_start_sect(bdev) + offset)) in queue_limits_stack_bdev() 939 pfx, bdev); in queue_limits_stack_bdev() 1009 int bdev_alignment_offset(struct block_device *bdev) in bdev_alignment_offset() argument 1011 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() 1015 if (bdev_is_partition(bdev)) in bdev_alignment_offset() 1017 bdev->bd_start_sect); in bdev_alignment_offset() 1022 unsigned int bdev_discard_alignment(struct block_device *bdev) in bdev_discard_alignment() argument 1024 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() 1026 if (bdev_is_partition(bdev)) in bdev_discard_alignment() [all …]
|
| A D | genhd.c | 633 struct block_device *bdev; in blk_report_disk_dead() local 644 xa_for_each(&disk->part_tbl, idx, bdev) { in blk_report_disk_dead() 649 bdev_mark_dead(bdev, surprise); in blk_report_disk_dead() 651 put_device(&bdev->bd_device); in blk_report_disk_dead() 842 struct block_device *bdev = disk->part0; in invalidate_disk() local 844 invalidate_bdev(bdev); in invalidate_disk() 845 bdev->bd_mapping->wb_err = 0; in invalidate_disk() 1065 struct block_device *bdev = dev_to_bdev(dev); in part_stat_show() local 1069 inflight = bdev_count_inflight(bdev); in part_stat_show() 1072 update_io_ticks(bdev, jiffies, true); in part_stat_show() [all …]
|
| A D | blk-crypto-internal.h | 86 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, 136 static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, in blk_crypto_ioctl() argument
|
| A D | blk-integrity.c | 58 int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd, in blk_get_meta_cap() argument 61 struct blk_integrity *bi = blk_get_integrity(bdev->bd_disk); in blk_get_meta_cap()
|
| A D | blk-flush.c | 468 int blkdev_issue_flush(struct block_device *bdev) in blkdev_issue_flush() argument 472 bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH); in blkdev_issue_flush()
|
| A D | blk-crypto-profile.c | 484 int blk_crypto_derive_sw_secret(struct block_device *bdev, in blk_crypto_derive_sw_secret() argument 489 bdev_get_queue(bdev)->crypto_profile; in blk_crypto_derive_sw_secret()
|
| A D | Makefile | 6 obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
|
| A D | blk-iolatency.c | 852 lockdep_assert_held(&ctx.bdev->bd_queue->rq_qos_mutex); in iolatency_set_limit() 853 if (!iolat_rq_qos(ctx.bdev->bd_queue)) in iolatency_set_limit() 854 ret = blk_iolatency_init(ctx.bdev->bd_disk); in iolatency_set_limit()
|
| A D | blk-throttle.c | 1379 if (!blk_throtl_activated(ctx.bdev->bd_queue)) { in tg_set_conf() 1380 ret = blk_throtl_init(ctx.bdev->bd_disk); in tg_set_conf() 1563 if (!blk_throtl_activated(ctx.bdev->bd_queue)) { in tg_set_limit() 1564 ret = blk_throtl_init(ctx.bdev->bd_disk); in tg_set_limit()
|
| A D | blk-cgroup.h | 216 struct block_device *bdev; member
|
| /block/partitions/ |
| A D | core.c | 300 struct block_device *bdev; in add_partition() local 326 bdev = bdev_alloc(disk, partno); in add_partition() 327 if (!bdev) in add_partition() 330 bdev->bd_start_sect = start; in add_partition() 331 bdev_set_nr_sectors(bdev, len); in add_partition() 333 pdev = &bdev->bd_device; in add_partition() 359 if (!bdev->bd_meta_info) in add_partition() 371 if (!bdev->bd_holder_dir) in add_partition() 388 bdev_add(bdev, devt); in add_partition() 393 return bdev; in add_partition() [all …]
|
| A D | ibm.c | 329 struct block_device *bdev = disk->part0; in ibm_partition() local 344 blocksize = bdev_logical_block_size(bdev); in ibm_partition() 347 nr_sectors = bdev_nr_sectors(bdev); in ibm_partition() 360 geo->start = get_start_sect(bdev); in ibm_partition() 361 if (disk->fops->getgeo(bdev, geo)) in ibm_partition()
|
| A D | cmdline.c | 218 const char *bdev) in cmdline_parts_find() argument 220 while (parts && strncmp(bdev, parts->name, sizeof(parts->name))) in cmdline_parts_find()
|