| /linux/drivers/md/ |
| A D | dm-ebs-target.c | 86 if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) in __ebs_rw_bvec() 100 if (op == REQ_OP_READ) { in __ebs_rw_bvec() 196 if (bio_op(bio) == REQ_OP_READ) in __ebs_process_bios() 209 if (bio_op(bio) == REQ_OP_READ) in __ebs_process_bios() 210 r = __ebs_rw_bio(ec, REQ_OP_READ, bio); in __ebs_process_bios()
|
| A D | dm-zero.c | 41 case REQ_OP_READ: in zero_map()
|
| A D | dm-kcopyd.c | 427 if (job->op == REQ_OP_READ || in pop_io_job() 580 if (job->op == REQ_OP_READ) in run_io_job() 827 job->op = REQ_OP_READ; in dm_kcopyd_copy()
|
| A D | dm-integrity.c | 558 if (op == REQ_OP_READ) { in sync_rw_sb() 1074 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? in rw_journal_sectors() 1717 io_req.bi_opf = REQ_OP_READ; in integrity_recheck() 1895 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE); in integrity_metadata() 2051 if (unlikely(dio->op == REQ_OP_READ)) { in __journal_read_write() 2152 if (unlikely(dio->op == REQ_OP_READ)) in __journal_read_write() 3089 io_req.bi_opf = REQ_OP_READ; in integrity_recalc() 3435 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL); in replay_journal() 3666 rw_journal_sectors(ic, REQ_OP_READ, 0, in dm_integrity_resume() 3682 rw_journal_sectors(ic, REQ_OP_READ, 0, in dm_integrity_resume() [all …]
|
| A D | raid5-ppl.c | 896 REQ_OP_READ, false)) { in ppl_recover_entry() 918 block_size, page2, REQ_OP_READ, in ppl_recover_entry() 995 s, page, REQ_OP_READ, false)) { in ppl_recover() 1096 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, in ppl_load_distributed()
|
| A D | dm-snap-persistent.c | 335 r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1); in read_header() 783 r = area_io(ps, REQ_OP_READ); in persistent_prepare_merge()
|
| /linux/fs/hfsplus/ |
| A D | part_tbl.c | 115 buf, (void **)&pm, REQ_OP_READ); in hfs_parse_new_pmap() 139 buf, &data, REQ_OP_READ); in hfs_part_find()
|
| A D | wrapper.c | 189 REQ_OP_READ); in hfsplus_read_wrapper() 221 (void **)&sbi->s_backup_vhdr, REQ_OP_READ); in hfsplus_read_wrapper()
|
| /linux/fs/nilfs2/ |
| A D | gcinode.c | 91 submit_bh(REQ_OP_READ, bh); in nilfs_gccache_submit_read_data() 131 REQ_OP_READ, out_bh, &pbn); in nilfs_gccache_submit_read_node()
|
| A D | mdt.c | 175 err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, &first_bh); in nilfs_mdt_read_block() 186 REQ_OP_READ | REQ_RAHEAD, &bh); in nilfs_mdt_read_block()
|
| /linux/drivers/block/rnbd/ |
| A D | rnbd-proto.h | 225 bio_opf = REQ_OP_READ; in rnbd_to_bio_flags() 262 case REQ_OP_READ: in rq_to_rnbd_flags()
|
| /linux/fs/btrfs/ |
| A D | bio.c | 181 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); in btrfs_end_repair_bio() 248 repair_bio = bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, in repair_one_sector() 354 if (bio_op(bio) == REQ_OP_READ) { in btrfs_simple_end_io() 371 if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) in btrfs_raid56_end_io() 463 ASSERT(bio_op(orig_bio) != REQ_OP_READ); in btrfs_submit_mirrored_bio() 489 if (bio_op(bio) != REQ_OP_READ) in btrfs_submit_bio() 498 if (bio_op(bio) == REQ_OP_READ) in btrfs_submit_bio() 697 if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) { in btrfs_submit_chunk()
|
| /linux/fs/xfs/ |
| A D | xfs_bio_io.c | 56 if (is_vmalloc && op == REQ_OP_READ) in xfs_rw_bdev()
|
| /linux/fs/ocfs2/ |
| A D | buffer_head_io.c | 150 submit_bh(REQ_OP_READ, bh); in ocfs2_read_blocks_sync() 330 submit_bh(REQ_OP_READ, bh); in ocfs2_read_blocks()
|
| /linux/include/linux/ |
| A D | blk_types.h | 327 REQ_OP_READ = (__force blk_opf_t)0, enumerator 457 return (op & REQ_OP_MASK) == REQ_OP_READ || in op_is_sync()
|
| /linux/drivers/md/bcache/ |
| A D | debug.c | 56 bio->bi_opf = REQ_OP_READ | REQ_META; in bch_btree_verify() 119 REQ_OP_READ); in bch_data_verify()
|
| /linux/fs/ |
| A D | direct-io.c | 264 if (dio_op == REQ_OP_READ && in dio_complete() 437 if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty) in dio_bio_submit() 503 bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty; in dio_bio_complete() 1186 dio->opf = REQ_OP_READ; in __blockdev_direct_IO()
|
| /linux/kernel/power/ |
| A D | swap.c | 318 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL); in mark_swapfiles() 1034 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL); in get_swap_reader() 1058 error = hib_submit_io(REQ_OP_READ, offset, buf, hb); in swap_read_page() 1573 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block, in swsusp_check() 1633 hib_submit_io(REQ_OP_READ, swsusp_resume_block, in swsusp_unmark()
|
| /linux/drivers/md/dm-vdo/ |
| A D | vio.c | 282 if (bio_op(vio->bio) == REQ_OP_READ) { in vio_record_metadata_io_error() 457 case REQ_OP_READ: in vdo_count_bios()
|
| /linux/fs/bcachefs/ |
| A D | fs-io-direct.c | 97 REQ_OP_READ, in bch2_direct_IO_read() 135 REQ_OP_READ, in bch2_direct_IO_read() 140 bio->bi_opf = REQ_OP_READ|REQ_SYNC; in bch2_direct_IO_read()
|
| /linux/block/ |
| A D | fops.c | 70 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); in __blkdev_direct_IO_simple() 173 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO() 308 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO_async()
|
| A D | bio-integrity.c | 402 case REQ_OP_READ: in bio_integrity_prep() 498 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) { in __bio_integrity_endio()
|
| A D | blk-wbt.c | 622 if (bio_op(bio) == REQ_OP_READ) { in bio_to_wbt_flags() 747 if (op == REQ_OP_READ) in wbt_data_dir()
|
| /linux/fs/erofs/ |
| A D | fileio.c | 69 bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ); in erofs_fileio_rq_alloc()
|
| /linux/mm/ |
| A D | page_io.c | 581 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); in swap_read_folio_bdev_sync() 600 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); in swap_read_folio_bdev_async()
|