| /linux/block/ |
| A D | blk-merge.c | 632 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn() 651 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn() 667 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 689 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > in ll_merge_requests_fn() 752 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) in blk_try_req_merge() 940 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 1020 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
|
| A D | bfq-iosched.c | 236 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT)) 963 return blk_rq_sectors(rq); in bfq_serv_to_charge() 965 return blk_rq_sectors(rq) * bfq_async_charge_factor; in bfq_serv_to_charge() 3260 blk_rq_sectors(rq); in bfq_reset_rate_computation() 3446 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); in bfq_update_peak_rate() 3451 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); in bfq_update_peak_rate() 3453 bfqd->last_rq_max_size = blk_rq_sectors(rq); in bfq_update_peak_rate() 3464 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_update_peak_rate() 3939 next_rq ? blk_rq_sectors(next_rq) : 0, in __bfq_bfqq_recalc_budget() 4549 blk_rq_sectors(bfqq->next_rq) >= in bfq_choose_bfqq_for_injection() [all …]
|
| A D | blk-core.c | 245 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); in blk_dump_rq_flags() 1115 if (blk_rq_sectors(rq) > max_sectors) { in blk_cloned_rq_check_limits() 1130 __func__, blk_rq_sectors(rq), max_sectors); in blk_cloned_rq_check_limits()
|
| A D | blk-flush.c | 102 if (blk_rq_sectors(rq)) in blk_flush_policy()
|
| A D | elevator.c | 54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
| /linux/include/linux/ |
| A D | blktrace_api.h | 135 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); in blk_rq_trace_nr_sectors()
|
| A D | blk-mq.h | 976 static inline unsigned int blk_rq_sectors(const struct request *rq) in blk_rq_sectors() function
|
| /linux/drivers/scsi/ |
| A D | sd.c | 877 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_unmap_cmnd() 913 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same16_cmnd() 945 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same10_cmnd() 976 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_zeroes_cmnd() 1074 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same_cmnd() 1221 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_read_write_cmnd() 1239 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { in sd_setup_read_write_cmnd() 1244 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { in sd_setup_read_write_cmnd() 1312 blk_rq_sectors(rq))); in sd_setup_read_write_cmnd() 1317 blk_rq_sectors(rq))); in sd_setup_read_write_cmnd()
|
| A D | sr.c | 412 "Finishing %u sectors\n", blk_rq_sectors(rq))); in sr_init_command() 479 this_count, blk_rq_sectors(rq))); in sr_init_command()
|
| A D | virtio_scsi.c | 536 blk_rq_sectors(rq))); in virtio_scsi_init_hdr_pi() 540 blk_rq_sectors(rq))); in virtio_scsi_init_hdr_pi()
|
| /linux/drivers/s390/block/ |
| A D | dasd_fba.c | 340 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_discard() 457 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_regular() 493 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); in dasd_fba_build_cp_regular()
|
| A D | dasd_diag.c | 532 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_diag_build_cp()
|
| A D | dasd_eckd.c | 3159 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_format() 3242 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_read() 4610 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_build_cp() 4690 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % in dasd_eckd_build_cp_raw() 4704 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / in dasd_eckd_build_cp_raw()
|
| /linux/drivers/block/ |
| A D | ps3disk.c | 113 __func__, __LINE__, op, n, blk_rq_sectors(req)); in ps3disk_submit_request_sg() 117 sectors = blk_rq_sectors(req) * priv->blocking_factor; in ps3disk_submit_request_sg()
|
| A D | floppy.c | 2439 blk_rq_sectors(current_req)); in copy_buffer() 2442 buffer_max > fsector_t + blk_rq_sectors(current_req)) in copy_buffer() 2444 blk_rq_sectors(current_req)); in copy_buffer() 2452 blk_rq_sectors(current_req)); in copy_buffer() 2593 blk_rq_sectors(current_req)); in make_raw_rw_request() 2645 max_size = blk_rq_sectors(current_req); in make_raw_rw_request() 2654 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { in make_raw_rw_request() 2658 sectors = fsector_t + blk_rq_sectors(current_req); in make_raw_rw_request() 2678 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && in make_raw_rw_request()
|
| A D | virtio_blk.c | 154 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); in virtblk_setup_discard_write_zeroes()
|
| A D | ataflop.c | 1511 blk_rq_sectors(bd->rq), bd->last)); in ataflop_queue_rq()
|
| /linux/drivers/mmc/core/ |
| A D | block.c | 1107 nr = blk_rq_sectors(req); in mmc_blk_issue_discard_rq() 1145 nr = blk_rq_sectors(req); in mmc_blk_issue_secdiscard_rq() 1324 brq->data.blocks = blk_rq_sectors(req); in mmc_blk_data_prep() 1349 (blk_rq_pos(req) + blk_rq_sectors(req) == in mmc_blk_data_prep() 1399 if (brq->data.blocks != blk_rq_sectors(req)) { in mmc_blk_data_prep()
|
| /linux/include/trace/events/ |
| A D | block.h | 517 __entry->nr_sector = blk_rq_sectors(rq);
|
| /linux/drivers/md/ |
| A D | dm-rq.c | 448 tio->n_sectors = blk_rq_sectors(orig); in dm_start_request()
|
| /linux/drivers/cdrom/ |
| A D | gdrom.c | 585 block_cnt = blk_rq_sectors(req)/GD_TO_BLK; in gdrom_readdisk_dma()
|
| /linux/drivers/block/paride/ |
| A D | pd.c | 497 pd_run = blk_rq_sectors(pd_req); in do_pd_io_start()
|
| A D | pf.c | 751 pf_run = blk_rq_sectors(pf_req); in pf_request()
|
| /linux/drivers/block/null_blk/ |
| A D | main.c | 1576 blk_rq_sectors(req)); in null_poll() 1617 sector_t nr_sectors = blk_rq_sectors(bd->rq); in null_queue_rq()
|
| /linux/drivers/block/mtip32xx/ |
| A D | mtip32xx.c | 2067 unsigned int nsect = blk_rq_sectors(rq); in mtip_hw_submit_io() 3396 if (blk_rq_sectors(rq) <= 64) { in mtip_check_unal_depth() 3397 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) in mtip_check_unal_depth()
|