Home
last modified time | relevance | path

Searched refs:bi_iter (Results 1 – 25 of 78) sorted by relevance

1234

/drivers/md/
A Ddm-bio-record.h26 struct bvec_iter bi_iter; member
37 bd->bi_iter = bio->bi_iter; in dm_bio_record()
49 bio->bi_iter = bd->bi_iter; in dm_bio_restore()
A Ddm-flakey.c357 bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio()
432 struct bvec_iter iter = bio->bi_iter; in clone_bio()
434 if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT)) in clone_bio()
437 size = bio->bi_iter.bi_size; in clone_bio()
446 clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector); in clone_bio()
511 pb->saved_iter = bio->bi_iter; in flakey_map()
557 clone->bi_iter); in flakey_map()
560 clone->bi_iter); in flakey_map()
A Ddm-ebs-target.c50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks()
145 sector_t block, blocks, sector = bio->bi_iter.bi_sector; in __ebs_discard_bio()
169 sector_t blocks, sector = bio->bi_iter.bi_sector; in __ebs_forget_bio()
195 block1 = __sector_to_block(ec, bio->bi_iter.bi_sector); in __ebs_process_bios()
200 if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs)) in __ebs_process_bios()
366 bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in ebs_map()
375 if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) || in ebs_map()
A Ddm-writecache.c1254 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); in bio_copy_block()
1306 writecache_discard(wc, bio->bi_iter.bi_sector, in writecache_flush_thread()
1361 if (bio->bi_iter.bi_size) in writecache_map_read()
1367 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map_read()
1388 while (bio_size < bio->bi_iter.bi_size) { in writecache_bio_copy_ssd()
1421 bio->bi_iter.bi_sector = start_cache_sec; in writecache_bio_copy_ssd()
1492 } while (bio->bi_iter.bi_size); in writecache_map_write()
1554 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in writecache_map()
1559 (unsigned long long)bio->bi_iter.bi_sector, in writecache_map()
1560 bio->bi_iter.bi_size, wc->block_size); in writecache_map()
[all …]
A Ddm-stripe.c255 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range()
261 bio->bi_iter.bi_sector = begin + in stripe_map_range()
263 bio->bi_iter.bi_size = to_bytes(end - begin); in stripe_map_range()
292 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map()
293 &stripe, &bio->bi_iter.bi_sector); in stripe_map()
295 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map()
A Dmd-linear.c236 sector_t bio_sector = bio->bi_iter.bi_sector; in linear_make_request()
275 bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - in linear_make_request()
294 (unsigned long long)bio->bi_iter.bi_sector, in linear_make_request()
A Ddm-log-writes.c222 bio->bi_iter.bi_size = 0; in write_metadata()
223 bio->bi_iter.bi_sector = sector; in write_metadata()
275 bio->bi_iter.bi_size = 0; in write_inline_data()
276 bio->bi_iter.bi_sector = sector; in write_inline_data()
356 bio->bi_iter.bi_size = 0; in log_one_block()
357 bio->bi_iter.bi_sector = sector; in log_one_block()
374 bio->bi_iter.bi_size = 0; in log_one_block()
375 bio->bi_iter.bi_sector = sector; in log_one_block()
703 block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); in log_writes_map()
A Ddm-io-rewind.c140 dm_bio_rewind_iter(bio, &bio->bi_iter, bytes); in dm_bio_rewind()
154 orig->bi_iter.bi_size)); in dm_io_rewind()
A Draid10.c1358 bio->bi_iter.bi_sector, in raid10_write_request()
1550 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1656 bio_start = bio->bi_iter.bi_sector; in raid10_handle_discard()
1705 bio_start = bio->bi_iter.bi_sector; in raid10_handle_discard()
1898 (bio->bi_iter.bi_sector & in raid10_make_request()
2371 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2372 fbio->bi_iter.bi_idx = 0; in sync_request_write()
2825 wbio->bi_iter.bi_sector = wsector + in narrow_write_error()
3437 bio->bi_iter.bi_sector = from_addr + in raid10_sync_request()
3459 bio->bi_iter.bi_sector = to_addr in raid10_sync_request()
[all …]
A Ddm-dust.c230 bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in dust_map()
233 r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map()
235 r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map()
A Ddm-integrity.c1954 bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector); in dm_integrity_map()
2042 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); in __journal_read_write()
2168 if (unlikely(bio->bi_iter.bi_size)) { in __journal_read_write()
2450 dio->range.logical_sector = bio->bi_iter.bi_sector; in dm_integrity_map_inline()
2489 dio->bio_details.bi_iter = bio->bi_iter; in dm_integrity_map_inline()
2495 bio->bi_iter.bi_sector += ic->start + SB_SECTORS; in dm_integrity_map_inline()
2506 while (dio->bio_details.bi_iter.bi_size) { in dm_integrity_map_inline()
2553 while (dio->bio_details.bi_iter.bi_size) { in dm_integrity_inline_recheck()
2580 outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS; in dm_integrity_inline_recheck()
2597 bio, dio->bio_details.bi_iter.bi_sector, 0); in dm_integrity_inline_recheck()
[all …]
A Ddm-unstripe.c120 sector_t sector = bio->bi_iter.bi_sector; in map_to_core()
140 bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start; in unstripe_map()
A Draid1.c319 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io()
1223 int size = bio->bi_iter.bi_size; in alloc_behind_master_bio()
1233 behind_bio->bi_iter.bi_size = size; in alloc_behind_master_bio()
1263 bio->bi_iter.bi_size); in alloc_behind_master_bio()
1298 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio()
1403 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request()
1480 bio->bi_iter.bi_sector, in raid1_write_request()
1707 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request()
2278 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
2540 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
[all …]
A Draid0.c451 sector_t start = bio->bi_iter.bi_sector; in raid0_handle_discard()
467 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, in raid0_handle_discard()
558 sector_t bio_sector = bio->bi_iter.bi_sector; in raid0_map_submit_bio()
584 bio->bi_iter.bi_sector = sector + zone->dev_start + in raid0_map_submit_bio()
606 sector = bio->bi_iter.bi_sector; in raid0_make_request()
A Ddm-linear.c94 bio->bi_iter.bi_sector = linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map()
A Ddm-thin.c675 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
692 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
717 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
721 bio->bi_iter.bi_sector = in remap()
1238 return bio->bi_iter.bi_size == in io_overlaps_block()
1914 if (!bio->bi_iter.bi_size) { in provision_block()
1988 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
2124 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
2248 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector) in cmp_cells()
2251 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector) in cmp_cells()
[all …]
A Ddm-zoned-target.c133 clone->bi_iter.bi_sector = in dmz_submit_bio()
135 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; in dmz_submit_bio()
139 bio_advance(bio, clone->bi_iter.bi_size); in dmz_submit_bio()
159 swap(bio->bi_iter.bi_size, size); in dmz_handle_read_zero()
161 swap(bio->bi_iter.bi_size, size); in dmz_handle_read_zero()
630 sector_t sector = bio->bi_iter.bi_sector; in dmz_map()
/drivers/md/bcache/
A Drequest.c123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
128 bio->bi_iter.bi_sector, in bch_data_invalidate()
848 s->iop.bio->bi_iter.bi_sector = in CLOSURE_CALLBACK()
849 s->cache_miss->bi_iter.bi_sector; in CLOSURE_CALLBACK()
935 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; in cached_dev_cache_miss()
1224 if (!bio->bi_iter.bi_size) { in cached_dev_submit_bio()
1270 swap(bio->bi_iter.bi_size, bytes); in flash_dev_cache_miss()
1272 swap(bio->bi_iter.bi_size, bytes); in flash_dev_cache_miss()
1276 if (!bio->bi_iter.bi_size) in flash_dev_cache_miss()
[all …]
A Ddebug.c54 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify()
55 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; in bch_btree_verify()
120 check->bi_iter.bi_sector = bio->bi_iter.bi_sector; in bch_data_verify()
121 check->bi_iter.bi_size = bio->bi_iter.bi_size; in bch_data_verify()
141 (uint64_t) bio->bi_iter.bi_sector); in bch_data_verify()
A Dutil.c233 size_t size = bio->bi_iter.bi_size; in bch_bio_map()
236 BUG_ON(!bio->bi_iter.bi_size); in bch_bio_map()
A Djournal.c57 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket()
58 bio->bi_iter.bi_size = len << 9; in journal_read_bucket()
619 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, in do_journal_discard()
621 bio->bi_iter.bi_size = bucket_bytes(ca); in do_journal_discard()
796 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
797 bio->bi_iter.bi_size = sectors << 9;
/drivers/md/dm-vdo/
A Dio-submitter.c148 vio->bios_merged.head->bi_iter.bi_sector); in get_bio_list()
150 vio->bios_merged.tail->bi_iter.bi_sector); in get_bio_list()
193 sector_t merge_sector = bio->bi_iter.bi_sector; in get_mergeable_locked()
216 return (vio_merge->bios_merged.tail->bi_iter.bi_sector == merge_sector ? in get_mergeable_locked()
220 return (vio_merge->bios_merged.head->bi_iter.bi_sector == merge_sector ? in get_mergeable_locked()
229 bio_sector = vio->bios_merged.head->bi_iter.bi_sector; in map_merged_vio()
234 bio_sector = vio->bios_merged.tail->bi_iter.bi_sector; in map_merged_vio()
241 vdo_int_map_remove(bio_map, prev_vio->bios_merged.tail->bi_iter.bi_sector); in merge_to_prev_tail()
254 vdo_int_map_remove(bio_map, next_vio->bios_merged.head->bi_iter.bi_sector); in merge_to_next_head()
291 bio->bi_iter.bi_sector, in try_bio_map_merge()
A Dvio.c45 physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK; in pbn_from_vio_bio()
186 bio->bi_iter.bi_sector = pbn * VDO_SECTORS_PER_BLOCK; in vdo_set_bio_properties()
460 if (((bio->bi_opf & REQ_PREFLUSH) != 0) && (bio->bi_iter.bi_size == 0)) { in vdo_count_bios()
/drivers/block/
A Dbrd.c111 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); in brd_rw_bvec()
112 sector_t sector = bio->bi_iter.bi_sector; in brd_rw_bvec()
140 bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len); in brd_rw_bvec()
186 brd_do_discard(brd, bio->bi_iter.bi_sector, in brd_submit_bio()
187 bio->bi_iter.bi_size); in brd_submit_bio()
195 } while (bio->bi_iter.bi_size); in brd_submit_bio()
/drivers/nvdimm/
A Dnd_virtio.c116 if (bio && bio->bi_iter.bi_sector != -1) { in async_pmem_flush()
124 child->bi_iter.bi_sector = -1; in async_pmem_flush()

Completed in 80 milliseconds

1234