Home
last modified time | relevance | path

Searched refs:req_op (Results 1 – 25 of 42) sorted by relevance

12

/linux/block/
A Dblk-merge.c576 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments()
590 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors()
591 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors()
592 return blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors()
595 blk_queue_get_max_sectors(q, req_op(rq))); in blk_rq_get_max_sectors()
605 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment()
775 if (req_op(req) != req_op(next)) in attempt_merge()
782 if (req_op(req) == REQ_OP_WRITE_SAME && in attempt_merge()
899 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
919 if (req_op(rq) == REQ_OP_WRITE_SAME && in blk_rq_merge_ok()
[all …]
A Dblk.h139 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable()
142 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable()
145 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable()
166 if (req_op(req) == REQ_OP_DISCARD && in blk_discard_mergable()
A Dblk-map.c158 bio->bi_opf |= req_op(rq); in bio_copy_user_iov()
247 bio->bi_opf |= req_op(rq); in bio_map_user_iov()
648 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
A Dblk-core.c231 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), in blk_print_req_error()
1113 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_cloned_rq_check_limits()
1238 const int sgrp = op_stat_group(req_op(req)); in __blk_account_io_done()
A Dblk-zoned.c66 switch (req_op(rq)) { in blk_req_needs_zone_write_lock()
/linux/drivers/block/rnbd/
A Drnbd-proto.h274 switch (req_op(rq)) { in rq_to_rnbd_flags()
295 req_op(rq), (unsigned long long)rq->cmd_flags); in rq_to_rnbd_flags()
/linux/drivers/scsi/
A Dsd_zbc.c418 switch (req_op(rq)) { in sd_zbc_need_zone_wp_update()
448 enum req_opf op = req_op(rq); in sd_zbc_zone_wp_update()
521 if (op_is_zone_mgmt(req_op(rq)) && in sd_zbc_complete()
534 if (req_op(rq) == REQ_OP_ZONE_APPEND) in sd_zbc_complete()
/linux/include/linux/
A Dblk-mq.h203 #define req_op(req) \ macro
208 return blk_op_is_passthrough(req_op(rq)); in blk_rq_is_passthrough()
216 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
219 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
/linux/arch/um/drivers/
A Dubd_kern.c483 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler()
1248 if (req_op(req->req) == REQ_OP_READ) { in cowify_req()
1268 int op = req_op(req); in ubd_map_req()
1331 int op = req_op(req); in ubd_submit_request()
1368 switch (req_op(req)) { in ubd_queue_rq()
1486 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io()
1507 switch (req_op(req->req)) { in do_io()
/linux/drivers/crypto/hisilicon/sec2/
A Dsec_crypto.c258 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb()
260 ctx->req_op->callback(ctx, req, err); in sec_req_cb()
1196 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer()
1200 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer()
1202 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer()
1209 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer()
1215 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer()
1738 ret = ctx->req_op->bd_send(ctx, req); in sec_process()
1815 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init()
1848 ctx->req_op = &sec_aead_req_ops; in sec_aead_init()
[all …]
A Dsec.h137 const struct sec_req_op *req_op; member
/linux/drivers/mmc/core/
A Dqueue.c46 switch (req_op(req)) { in mmc_cqe_issue_type()
66 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
/linux/drivers/md/
A Ddm-rq.c217 if (req_op(clone) == REQ_OP_DISCARD && in dm_done()
220 else if (req_op(clone) == REQ_OP_WRITE_SAME && in dm_done()
223 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
/linux/drivers/block/null_blk/
A Dtrace.h44 __entry->op = req_op(cmd->rq);
A Dmain.c1223 op_is_write(req_op(rq)), sector, in null_handle_rq()
1342 } else if (req_op(cmd->rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1575 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), in null_poll()
1657 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); in null_queue_rq()
/linux/drivers/block/
A Dxen-blkfront.c558 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req()
763 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req()
775 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { in blkif_queue_rw_req()
864 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request()
865 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request()
885 ((req_op(req) == REQ_OP_FLUSH) && in blkif_request_flush_invalid()
2084 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume()
2085 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume()
2086 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
A Dps3disk.c168 switch (req_op(req)) { in ps3disk_do_request()
231 if (req_op(req) == REQ_OP_FLUSH) { in ps3disk_interrupt()
A Dloop.c344 req_op(rq) != REQ_OP_READ) { in lo_complete_rq()
476 switch (req_op(rq)) { in do_req_filebacked()
1809 switch (req_op(rq)) { in loop_queue_rq()
1841 const bool write = op_is_write(req_op(rq)); in loop_handle_cmd()
A Dsx8.c700 return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in carm_rq_dir()
723 if (req_op(rq) == REQ_OP_DRV_OUT) in carm_queue_rq()
/linux/drivers/mtd/
A Dmtd_blkdevs.c55 if (req_op(req) == REQ_OP_FLUSH) { in do_blktrans_request()
65 switch (req_op(req)) { in do_blktrans_request()
/linux/drivers/nvme/host/
A Dzns.c246 if (req_op(req) == REQ_OP_ZONE_RESET_ALL) in nvme_setup_zone_mgmt_send()
/linux/drivers/block/paride/
A Dpd.c487 switch (req_op(pd_req)) { in do_pd_io_start()
500 if (req_op(pd_req) == REQ_OP_READ) in do_pd_io_start()
/linux/drivers/s390/block/
A Ddasd_fba.c560 if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES) in dasd_fba_build_cp()
/linux/drivers/mtd/ubi/
A Dblock.c320 switch (req_op(req)) { in ubiblock_queue_rq()
/linux/drivers/nvme/target/
A Dpassthru.c214 bio->bi_opf = req_op(rq); in nvmet_passthru_map_sg()

Completed in 54 milliseconds

12