Home
last modified time | relevance | path

Searched refs:bi_bdev (Results 1 – 25 of 60) sorted by relevance

123

/linux/drivers/md/
A Ddm-bio-record.h23 struct block_device *bi_bdev; member
35 bd->bi_bdev = bio->bi_bdev; in dm_bio_record()
47 bio->bi_bdev = bd->bi_bdev; in dm_bio_restore()
A Ddm-audit.c73 int dev_major = MAJOR(bio->bi_bdev->bd_dev); in dm_audit_log_bio()
74 int dev_minor = MINOR(bio->bi_bdev->bd_dev); in dm_audit_log_bio()
A Ddm-io-rewind.c59 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in dm_bio_integrity_rewind()
146 struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig, in dm_io_rewind()
A Draid1-10.c119 struct md_rdev *rdev = (void *)bio->bi_bdev; in raid1_submit_write()
126 !bdev_max_discard_sectors(bio->bi_bdev))) in raid1_submit_write()
A Ddm-raid1.c1205 bio_record->details.bi_bdev = NULL; in mirror_map()
1272 if (!bio_record->details.bi_bdev) { in mirror_end_io()
1297 bio_record->details.bi_bdev = NULL; in mirror_end_io()
1307 bio_record->details.bi_bdev = NULL; in mirror_end_io()
A Dmd.h697 md_sync_acct(bio->bi_bdev, nr_sectors); in md_sync_acct_bio()
942 !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) in mddev_check_write_zeroes()
A Draid5-ppl.c423 bio->bi_bdev); in ppl_submit_iounit_bio()
495 bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS, in ppl_submit_iounit()
588 pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev); in ppl_flush_endio()
636 pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev); in ppl_do_flush()
/linux/block/
A Dblk-core.c330 struct gendisk *disk = bio->bi_bdev->bd_disk; in __bio_queue_enter()
508 if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) in bio_check_ro()
511 bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); in bio_check_ro()
518 bio->bi_bdev); in bio_check_ro()
557 struct block_device *p = bio->bi_bdev; in blk_partition_remap()
580 if (!bdev_is_zoned(bio->bi_bdev)) in blk_check_zone_append()
617 struct gendisk *disk = bio->bi_bdev->bd_disk; in __submit_bio()
673 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct()
753 struct block_device *bdev = bio->bi_bdev; in submit_bio_noacct()
838 if (!bdev_is_zoned(bio->bi_bdev)) in submit_bio_noacct()
[all …]
A Dbio.c249 bio->bi_bdev = bdev; in bio_init()
305 bio->bi_bdev = bdev; in bio_reset()
306 if (bio->bi_bdev) in bio_reset()
783 bio->bi_bdev = NULL; in bio_put_percpu_cache()
832 if (bio->bi_bdev) { in __bio_clone()
833 if (bio->bi_bdev == bio_src->bi_bdev && in __bio_clone()
1092 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) in bio_add_zone_append_page()
1324 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) in __bio_iov_iter_get_pages()
1342 if (bio->bi_bdev) { in __bio_iov_iter_get_pages()
1449 bio->bi_bdev->bd_disk->lockdep_map); in submit_bio_wait()
[all …]
A Dbio-integrity.c167 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_integrity_add_page()
309 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_integrity_map_user()
386 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in bio_integrity_prep()
495 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in __bio_integrity_endio()
521 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in bio_integrity_advance()
537 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in bio_integrity_trim()
A Dblk-zoned.c707 struct gendisk *disk = bio->bi_bdev->bd_disk; in blk_zone_wplug_handle_reset_or_finish()
735 struct gendisk *disk = bio->bi_bdev->bd_disk; in blk_zone_wplug_handle_reset_all()
765 percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter); in blk_zone_wplug_add_bio()
815 zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk, in blk_zone_write_plug_bio_merged()
897 struct gendisk *disk = bio->bi_bdev->bd_disk; in blk_zone_wplug_prepare_bio()
946 struct gendisk *disk = bio->bi_bdev->bd_disk; in blk_zone_wplug_handle_write()
1030 struct block_device *bdev = bio->bi_bdev; in blk_zone_plug_bio()
1144 struct gendisk *disk = bio->bi_bdev->bd_disk; in blk_zone_write_plug_bio_endio()
1182 if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) in blk_zone_write_plug_bio_endio()
1242 bdev = bio->bi_bdev; in blk_zone_wplug_bio_work()
A Dblk-rq-qos.h139 if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || in rq_qos_done_bio()
141 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in rq_qos_done_bio()
A Dt10-pi.c377 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in blk_integrity_generate()
383 iter.disk_name = bio->bi_bdev->bd_disk->disk_name; in blk_integrity_generate()
409 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in blk_integrity_verify()
419 iter.disk_name = bio->bi_bdev->bd_disk->disk_name; in blk_integrity_verify()
A Dblk-throttle.h170 if (!blk_throtl_activated(bio->bi_bdev->bd_queue)) in blk_should_throtl()
A Dbounce.c167 bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), in bounce_clone_bio()
A Dblk-crypto.c293 if (blk_crypto_config_supported_natively(bio->bi_bdev, in __blk_crypto_bio_prep()
/linux/drivers/md/bcache/
A Drequest.c702 bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO); in do_bio_hook()
847 bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ); in CLOSURE_CALLBACK()
929 cache_bio = bio_alloc_bioset(miss->bi_bdev, in cached_dev_cache_miss()
1040 flush = bio_alloc_bioset(bio->bi_bdev, 0, in cached_dev_write()
1053 s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, in cached_dev_write()
1187 struct block_device *orig_bdev = bio->bi_bdev; in cached_dev_submit_bio()
1296 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; in flash_dev_submit_bio()
1304 s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); in flash_dev_submit_bio()
/linux/fs/xfs/
A Dxfs_bio_io.c41 bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left), in xfs_rw_bdev()
/linux/include/linux/
A Dbio.h452 disk_devt((bio)->bi_bdev->bd_disk)
476 if (bio->bi_bdev != bdev) in bio_set_dev()
478 bio->bi_bdev = bdev; in bio_set_dev()
A Dblkdev.h889 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); in bio_zone_no()
896 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); in bio_straddles_zones()
1369 return bdev_offset_from_zone_start(bio->bi_bdev, in bio_offset_from_zone_start()
1540 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); in bio_end_io_acct()
A Dblk_types.h216 struct block_device *bi_bdev; member
/linux/kernel/trace/
A Dblktrace.c910 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); in blk_add_trace_bio_bounce()
922 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, in blk_add_trace_bio_backmerge()
928 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, in blk_add_trace_bio_frontmerge()
934 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); in blk_add_trace_bio_queue()
939 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); in blk_add_trace_getrq()
976 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in blk_add_trace_split()
1005 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in blk_add_trace_bio_remap()
/linux/drivers/nvdimm/
A Dnd_virtio.c117 struct bio *child = bio_alloc(bio->bi_bdev, 0, in async_pmem_flush()
/linux/drivers/block/
A Dn64cart.c91 struct device *dev = bio->bi_bdev->bd_disk->private_data; in n64cart_submit_bio()
/linux/arch/m68k/emu/
A Dnfblock.c62 struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data; in nfhd_submit_bio()

Completed in 57 milliseconds

123