Home
last modified time | relevance | path

Searched refs:opf (Results 1 – 21 of 21) sorted by relevance

/drivers/md/
A Ddm-io.c306 static void do_region(const blk_opf_t opf, unsigned int region, in do_region() argument
319 const enum req_op op = opf & REQ_OP_MASK; in do_region()
353 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO, in do_region()
382 WARN_ON_ONCE(opf & REQ_ATOMIC && remaining); in do_region()
386 static void dispatch_io(blk_opf_t opf, unsigned int num_regions, in dispatch_io() argument
401 if (where[i].count || (opf & REQ_PREFLUSH)) in dispatch_io()
402 do_region(opf, i, where + i, dp, io, ioprio); in dispatch_io()
413 struct dm_io_region *where, blk_opf_t opf, in async_io() argument
429 dispatch_io(opf, num_regions, where, dp, io, ioprio); in async_io()
446 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, in sync_io() argument
[all …]
A Ddm-snap-persistent.c232 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, in chunk_io() argument
241 .bi_opf = opf, in chunk_io()
288 static int area_io(struct pstore *ps, blk_opf_t opf) in area_io() argument
292 return chunk_io(ps, ps->area, chunk, opf, 0); in area_io()
A Ddm-integrity.c530 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) in sync_rw_sb() argument
534 const enum req_op op = opf & REQ_OP_MASK; in sync_rw_sb()
537 io_req.bi_opf = opf; in sync_rw_sb()
1037 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, in rw_journal_sectors() argument
1055 io_req.bi_opf = opf; in rw_journal_sectors()
1075 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? in rw_journal_sectors()
1084 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, in rw_journal() argument
1093 rw_journal_sectors(ic, opf, sector, n_sectors, comp); in rw_journal()
A Dmd.h916 struct page *page, blk_opf_t opf, bool metadata_op);
A Ddm.c1531 blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; in __send_empty_flush() local
1535 opf |= REQ_IDLE; in __send_empty_flush()
1542 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf); in __send_empty_flush()
A Dmd.c1068 struct page *page, blk_opf_t opf, bool metadata_op) in sync_page_io() argument
1074 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); in sync_page_io()
1076 bio_init(&bio, rdev->bdev, &bvec, 1, opf); in sync_page_io()
/drivers/block/
A Dbrd.c58 blk_opf_t opf) in brd_insert_page() argument
62 gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO; in brd_insert_page()
114 blk_opf_t opf = bio->bi_opf; in brd_rw_bvec() local
122 if (!page && op_is_write(opf)) { in brd_rw_bvec()
123 page = brd_insert_page(brd, sector, opf); in brd_rw_bvec()
129 if (op_is_write(opf)) { in brd_rw_bvec()
145 if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT)) in brd_rw_bvec()
/drivers/nvme/target/
A Dio-cmd-bdev.c248 blk_opf_t opf; in nvmet_bdev_execute_rw() local
263 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_rw()
265 opf |= REQ_FUA; in nvmet_bdev_execute_rw()
268 opf = REQ_OP_READ; in nvmet_bdev_execute_rw()
273 opf |= REQ_FAILFAST_DEV; in nvmet_bdev_execute_rw()
276 opf |= REQ_NOMERGE; in nvmet_bdev_execute_rw()
283 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_rw()
285 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, in nvmet_bdev_execute_rw()
312 opf, GFP_KERNEL); in nvmet_bdev_execute_rw()
A Dzns.c536 const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_zone_append() local
575 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_zone_append()
577 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append()
/drivers/scsi/device_handler/
A Dscsi_dh_hp_sw.c83 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_tur() local
103 res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, in hp_sw_tur()
132 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_start_stop() local
157 res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, in hp_sw_start_stop()
A Dscsi_dh_emc.c242 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in send_trespass_cmd() local
269 err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len, in send_trespass_cmd()
A Dscsi_dh_alua.c130 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in submit_rtpg() local
145 return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen, in submit_rtpg()
163 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in submit_stpg() local
180 return scsi_execute_cmd(sdev, cdb, opf, stpg_data, in submit_stpg()
A Dscsi_dh_rdac.c513 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in send_mode_select() local
573 rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size, in send_mode_select()
/drivers/target/
A Dtarget_core_iblock.c354 blk_opf_t opf) in iblock_get_bio() argument
363 bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, in iblock_get_bio()
751 blk_opf_t opf; in iblock_execute_rw() local
764 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in iblock_execute_rw()
772 opf |= REQ_FUA; in iblock_execute_rw()
774 opf |= REQ_FUA; in iblock_execute_rw()
777 opf = REQ_OP_READ; in iblock_execute_rw()
792 bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); in iblock_execute_rw()
825 bio = iblock_get_bio(cmd, block_lba, sg_num, opf); in iblock_execute_rw()
/drivers/md/dm-vdo/
A Ddata-vio.c1553 blk_opf_t opf = ((data_vio->user_bio->bi_opf & PASSTHROUGH_FLAGS) | REQ_OP_READ); in read_block() local
1556 result = vio_reset_bio(vio, vio->data, read_endio, opf, in read_block()
1560 bio_reset(vio->bio, vio->bio->bi_bdev, opf); in read_block()
1565 vdo_set_bio_properties(vio->bio, vio, read_endio, opf, in read_block()
/drivers/md/bcache/
A Dsuper.c416 static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k, in uuid_io() argument
431 bio->bi_opf = opf | REQ_SYNC | REQ_META; in uuid_io()
440 if ((opf & REQ_OP_MASK) != REQ_OP_WRITE) in uuid_io()
445 pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ? in uuid_io()
590 static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf) in prio_io() argument
603 bio->bi_opf = opf | REQ_SYNC | REQ_META; in prio_io()
/drivers/scsi/
A Dscsi_lib.c295 blk_opf_t opf, void *buffer, unsigned int bufflen, in scsi_execute_cmd() argument
311 req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags); in scsi_execute_cmd()
1237 struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, in scsi_alloc_request() argument
1242 rq = blk_mq_alloc_request(q, opf, flags); in scsi_alloc_request()
A Dscsi_transport_spi.c111 blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in spi_execute() local
133 return scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, DV_TIMEOUT, 1, in spi_execute()
/drivers/block/drbd/
A Ddrbd_int.h322 blk_opf_t opf; member
336 ((peer_req)->opf & REQ_OP_MASK)
A Ddrbd_receiver.c1500 drbd_err(device, "Invalid bio op received: 0x%x\n", peer_req->opf); in drbd_submit_peer_request()
1504 bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO); in drbd_submit_peer_request()
1888 peer_req->opf = REQ_OP_WRITE; in recv_resync_read()
2417 peer_req->opf = wire_flags_to_bio(connection, dp_flags); in receive_Data()
2686 peer_req->opf = REQ_OP_READ; in receive_DataRequest()
4790 peer_req->opf = REQ_OP_DISCARD; in receive_rs_deallocated()
A Ddrbd_worker.c403 peer_req->opf = REQ_OP_READ; in read_for_csum()

Completed in 783 milliseconds