Searched refs:bdev_get_queue (Results 1 – 25 of 31) sorted by relevance
12
1163 return queue_max_segments(bdev_get_queue(bdev)); in bdev_max_segments()1198 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min()1208 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt()1244 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()1254 return blk_queue_nonrot(bdev_get_queue(bdev)); in bdev_nonrot()1260 &bdev_get_queue(bdev)->queue_flags); in bdev_synchronous()1266 &bdev_get_queue(bdev)->queue_flags); in bdev_stable_writes()1286 return blk_queue_zoned_model(bdev_get_queue(bdev)); in bdev_zoned_model()1291 return blk_queue_is_zoned(bdev_get_queue(bdev)); in bdev_is_zoned()1310 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()[all …]
362 return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile, in blk_crypto_config_supported_natively()417 struct request_queue *q = bdev_get_queue(bdev); in blk_crypto_evict_key()
638 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __submit_bio_noacct()656 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct()725 struct request_queue *q = bdev_get_queue(bdev); in submit_bio_noacct()870 q = bdev_get_queue(bdev); in bio_poll()
708 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()963 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()976 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
141 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in rq_qos_done_bio()
1081 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_zone_append_page()1183 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_bvec_set()1215 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_add_zone_append_page()1586 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
134 bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits, in bio_integrity_add_page()
837 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); in blkg_conf_finish()1976 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); in bio_associate_blkg_from_css()1977 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; in bio_associate_blkg_from_css()
67 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter()
259 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt()
975 struct request_queue *q = bdev_get_queue(bdev); in part_stat_show()1024 struct request_queue *q = bdev_get_queue(bdev); in part_inflight_show()
516 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl()
403 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; in bio_split_to_limits()
401 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits()848 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable()1258 bdev_get_queue(dev->bdev)->crypto_profile; in device_intersect_crypto_capabilities()1491 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_poll_capable()1582 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model()1771 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable()1824 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random()1832 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable()
539 q = bdev_get_queue(bdev); in multipath_clone_and_map()885 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh()961 q = bdev_get_queue(p->path.dev->bdev); in parse_path()1626 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()2100 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
316 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
2027 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported()2048 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
589 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying()
3371 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; in disable_passdown_if_not_supported()3393 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; in set_discard_limits()
732 struct request_queue *q = bdev_get_queue(bdev); in blk_trace_ioctl()1768 struct request_queue *q = bdev_get_queue(bdev); in sysfs_blk_trace_attr_show()1802 struct request_queue *q = bdev_get_queue(bdev); in sysfs_blk_trace_attr_store()
548 cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev))); in rnbd_srv_fill_msg_open_rsp()
124 q = bdev_get_queue(bd); in iblock_configure_device()
686 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet()2155 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
772 struct request_queue *backingq = bdev_get_queue(I_BDEV(inode)); in loop_config_discard()
1017 q = bdev_get_queue(dc->bdev); in cached_dev_status_update()1399 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_init()
Completed in 70 milliseconds