| /linux/block/ |
| A D | blk-merge.c | 661 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn() 680 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn() 696 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 718 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > in ll_merge_requests_fn() 814 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) in blk_try_req_merge() 999 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 1088 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
|
| A D | bfq-iosched.c | 235 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT)) 1048 return blk_rq_sectors(rq); in bfq_serv_to_charge() 1050 return blk_rq_sectors(rq) * bfq_async_charge_factor; in bfq_serv_to_charge() 3442 blk_rq_sectors(rq); in bfq_reset_rate_computation() 3628 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); in bfq_update_peak_rate() 3633 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); in bfq_update_peak_rate() 3635 bfqd->last_rq_max_size = blk_rq_sectors(rq); in bfq_update_peak_rate() 3646 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_update_peak_rate() 4119 next_rq ? blk_rq_sectors(next_rq) : 0, in __bfq_bfqq_recalc_budget() 4731 blk_rq_sectors(bfqq->next_rq) >= in bfq_choose_bfqq_for_injection() [all …]
|
| A D | blk-flush.c | 392 if (blk_rq_sectors(rq)) in blk_insert_flush()
|
| A D | elevator.c | 54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
| A D | blk-mq.c | 759 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); in blk_dump_rq_flags() 1250 rq->stats_sectors = blk_rq_sectors(rq); in blk_mq_start_request() 3066 if (blk_rq_sectors(rq) > max_sectors) { in blk_insert_cloned_request() 3081 __func__, blk_rq_sectors(rq), max_sectors); in blk_insert_cloned_request()
|
| A D | blk-zoned.c | 832 sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req); in blk_zone_write_plug_init_request()
|
| /linux/include/linux/ |
| A D | blktrace_api.h | 128 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); in blk_rq_trace_nr_sectors()
|
| A D | blk-mq.h | 1081 static inline unsigned int blk_rq_sectors(const struct request *rq) in blk_rq_sectors() function
|
| /linux/drivers/s390/block/ |
| A D | dasd_fba.c | 336 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_discard() 453 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_regular() 489 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); in dasd_fba_build_cp_regular()
|
| A D | dasd_diag.c | 529 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_diag_build_cp()
|
| A D | dasd_eckd.c | 3198 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_format() 3284 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_read() 4673 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_build_cp() 4752 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % in dasd_eckd_build_cp_raw() 4766 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / in dasd_eckd_build_cp_raw()
|
| /linux/drivers/block/ |
| A D | ps3disk.c | 113 __func__, __LINE__, op, n, blk_rq_sectors(req)); in ps3disk_submit_request_sg() 117 sectors = blk_rq_sectors(req) * priv->blocking_factor; in ps3disk_submit_request_sg()
|
| A D | floppy.c | 2440 blk_rq_sectors(current_req)); in copy_buffer() 2443 buffer_max > fsector_t + blk_rq_sectors(current_req)) in copy_buffer() 2445 blk_rq_sectors(current_req)); in copy_buffer() 2453 blk_rq_sectors(current_req)); in copy_buffer() 2592 blk_rq_sectors(current_req)); in make_raw_rw_request() 2644 max_size = blk_rq_sectors(current_req); in make_raw_rw_request() 2653 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { in make_raw_rw_request() 2657 sectors = fsector_t + blk_rq_sectors(current_req); in make_raw_rw_request() 2677 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && in make_raw_rw_request()
|
| A D | ublk_drv.c | 440 iod->nr_sectors = blk_rq_sectors(req); in ublk_setup_iod_zoned() 972 iod->nr_sectors = blk_rq_sectors(req); in ublk_setup_iod()
|
| A D | ataflop.c | 1512 blk_rq_sectors(bd->rq), bd->last)); in ataflop_queue_rq()
|
| A D | virtio_blk.c | 184 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); in virtblk_setup_discard_write_zeroes_erase()
|
| /linux/drivers/scsi/ |
| A D | sd.c | 919 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_unmap_cmnd() 1008 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same16_cmnd() 1035 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same10_cmnd() 1061 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_zeroes_cmnd() 1326 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_read_write_cmnd() 1345 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { in sd_setup_read_write_cmnd() 1350 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { in sd_setup_read_write_cmnd() 1417 blk_rq_sectors(rq))); in sd_setup_read_write_cmnd() 1422 blk_rq_sectors(rq))); in sd_setup_read_write_cmnd()
|
| A D | sr.c | 376 "Finishing %u sectors\n", blk_rq_sectors(rq))); in sr_init_command() 443 this_count, blk_rq_sectors(rq))); in sr_init_command()
|
| A D | virtio_scsi.c | 548 blk_rq_sectors(rq))); in virtio_scsi_init_hdr_pi() 552 blk_rq_sectors(rq))); in virtio_scsi_init_hdr_pi()
|
| /linux/drivers/md/ |
| A D | dm-rq.c | 441 tio->n_sectors = blk_rq_sectors(orig); in dm_start_request()
|
| /linux/drivers/mmc/core/ |
| A D | block.c | 1212 nr = blk_rq_sectors(req); in mmc_blk_issue_erase_rq() 1267 nr = blk_rq_sectors(req); in mmc_blk_issue_secdiscard_rq() 1446 brq->data.blocks = blk_rq_sectors(req); in mmc_blk_data_prep() 1471 (blk_rq_pos(req) + blk_rq_sectors(req) == in mmc_blk_data_prep() 1521 if (brq->data.blocks != blk_rq_sectors(req)) { in mmc_blk_data_prep()
|
| /linux/include/trace/events/ |
| A D | block.h | 591 __entry->nr_sector = blk_rq_sectors(rq);
|
| /linux/drivers/cdrom/ |
| A D | gdrom.c | 584 block_cnt = blk_rq_sectors(req)/GD_TO_BLK; in gdrom_readdisk_dma()
|
| /linux/drivers/block/null_blk/ |
| A D | main.c | 1543 blk_rq_sectors(req)); in null_poll() 1592 sector_t nr_sectors = blk_rq_sectors(rq); in null_queue_rq()
|
| /linux/drivers/block/mtip32xx/ |
| A D | mtip32xx.c | 2055 unsigned int nsect = blk_rq_sectors(rq); in mtip_hw_submit_io() 3247 if (blk_rq_sectors(rq) <= 64) { in mtip_check_unal_depth() 3248 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) in mtip_check_unal_depth()
|