Home
last modified time | relevance | path

Searched refs:bv_len (Results 1 – 25 of 91) sorted by relevance

1234

/linux/include/linux/
A Dbvec.h33 unsigned int bv_len; member
48 bv->bv_len = len; in bvec_set_page()
117 .bv_len = mp_bvec_iter_len((bvec), (iter)), \
136 .bv_len = bvec_iter_len((bvec), (iter)), \
154 while (bytes && bytes >= bv[idx].bv_len) { in bvec_iter_advance()
155 bytes -= bv[idx].bv_len; in bvec_iter_advance()
173 if (done == bv[iter->bi_idx].bv_len) { in bvec_iter_advance_single()
216 bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, in bvec_advance()
217 bvec->bv_len - iter_all->done); in bvec_advance()
218 iter_all->done += bv->bv_len; in bvec_advance()
[all …]
A Dbio.h149 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
158 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
172 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
297 fi->_seg_count = bvec->bv_len; in bio_first_folio()
/linux/drivers/block/
A Dn64cart.c69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec()
79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec()
83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec()
99 pos += bvec.bv_len; in n64cart_submit_bio()
/linux/block/
A Dblk-integrity.c40 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg()
43 seg_size += iv.bv_len; in blk_rq_count_integrity_sg()
47 seg_size = iv.bv_len; in blk_rq_count_integrity_sg()
81 if (sg->length + iv.bv_len > queue_max_segment_size(q)) in blk_rq_map_integrity_sg()
84 sg->length += iv.bv_len; in blk_rq_map_integrity_sg()
94 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); in blk_rq_map_integrity_sg()
A Dblk-merge.c32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec()
49 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec()
278 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs()
296 return len > 0 || bv->bv_len > max_len; in bvec_split_segs()
327 bytes + bv.bv_len <= max_bytes && in bio_split_rw_at()
328 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in bio_split_rw_at()
330 bytes += bv.bv_len; in bio_split_rw_at()
475 unsigned nbytes = bvec->bv_len; in blk_bvec_map_sg()
510 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in __blk_bvec_map_sg()
520 int nbytes = bvec->bv_len; in __blk_segment_map_sg_merge()
[all …]
A Dt10-pi.c148 for (j = 0; j < iv.bv_len; j += tuple_sz) { in t10_pi_type1_prepare()
196 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { in t10_pi_type1_complete()
322 for (j = 0; j < iv.bv_len; j += tuple_sz) { in ext_pi_type1_prepare()
359 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { in ext_pi_type1_complete()
391 iter.data_size = bv.bv_len; in blk_integrity_generate()
428 iter.data_size = bv.bv_len; in blk_integrity_verify()
A Dbounce.c98 bio_advance_iter(from, &from_iter, tovec.bv_len); in copy_to_high_bio_irq()
118 bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len); in bounce_end_io()
215 bytes += from.bv_len; in __blk_queue_bounce()
A Dblk-map.c58 bvec->bv_len, in bio_copy_from_iter()
64 if (ret < bvec->bv_len) in bio_copy_from_iter()
89 bvec->bv_len, in bio_copy_to_iter()
95 if (ret < bvec->bv_len) in bio_copy_to_iter()
366 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
457 p += bvec->bv_len; in bio_copy_kern_endio_read()
A Dblk.h112 if (addr1 + vec1->bv_len != addr2) in biovec_phys_mergeable()
116 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) in biovec_phys_mergeable()
125 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); in __bvec_gap_to_prev()
355 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; in bio_may_need_split()
A Dbio.c654 if (done + bv.bv_len > new_size) { in bio_truncate()
662 bv.bv_len - offset); in bio_truncate()
665 done += bv.bv_len; in bio_truncate()
923 size_t bv_end = bv->bv_offset + bv->bv_len; in bvec_try_merge_page()
943 bv->bv_len += len; in bvec_try_merge_page()
962 if (len > queue_max_segment_size(q) - bv->bv_len) in bvec_try_merge_hw_page()
1497 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter()
A Dblk-crypto-fallback.c220 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_fallback_split_bio_if_needed()
336 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { in blk_crypto_fallback_encrypt_bio()
424 for (i = 0; i < bv.bv_len; i += data_unit_size) { in blk_crypto_fallback_decrypt_bio()
/linux/drivers/md/
A Ddm-io-rewind.c27 while (idx >= 0 && bytes && bytes > bv[idx].bv_len) { in dm_bvec_iter_rewind()
28 bytes -= bv[idx].bv_len; in dm_bvec_iter_rewind()
41 iter->bi_bvec_done = bv[idx].bv_len - bytes; in dm_bvec_iter_rewind()
A Ddm-ebs-target.c71 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec() local
76 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec()
82 while (bv_len) { in __ebs_rw_bvec()
83 cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len); in __ebs_rw_bvec()
86 if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) in __ebs_rw_bvec()
113 bv_len -= cur_len; in __ebs_rw_bvec()
A Ddm-log-writes.c367 block->vecs[i].bv_len, 0); in log_one_block()
368 if (ret != block->vecs[i].bv_len) { in log_one_block()
380 block->vecs[i].bv_len, 0); in log_one_block()
381 if (ret != block->vecs[i].bv_len) { in log_one_block()
387 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block()
750 block->vecs[i].bv_len = bv.bv_len; in log_writes_map()
/linux/drivers/md/bcache/
A Dutil.c244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
251 base += bv->bv_len; in bch_bio_map()
254 size -= bv->bv_len; in bch_bio_map()
A Ddebug.c137 cache_set_err_on(memcmp(p1, p2, bv.bv_len), in bch_data_verify()
145 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
/linux/drivers/s390/block/
A Ddasd_fba.c458 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular()
461 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular()
462 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular()
463 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular()
505 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular()
509 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_build_cp_regular()
583 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_free_cp()
594 memcpy(dst, cda, bv.bv_len); in dasd_fba_free_cp()
/linux/net/ceph/
A Dmessenger_v2.c282 con->v2.out_bvec.bv_len); in set_out_bvec()
294 con->v2.out_bvec.bv_len); in set_out_bvec_zero()
1893 con->v2.in_bvec.bv_len); in prepare_read_data_cont()
1898 con->v2.in_bvec.bv_len); in prepare_read_data_cont()
1903 con->v2.in_bvec.bv_len); in prepare_read_data_cont()
1939 con->v2.in_bvec.bv_len); in prepare_sparse_read_cont()
1943 con->v2.in_bvec.bv_len); in prepare_sparse_read_cont()
1948 con->v2.in_bvec.bv_len); in prepare_sparse_read_cont()
1959 bv.bv_len = cursor->sr_resid; in prepare_sparse_read_cont()
2006 if (bv.bv_len > cursor->sr_resid) in prepare_sparse_read_cont()
[all …]
/linux/fs/squashfs/
A Dblock.c50 int bytes_to_copy = min_t(int, bvec->bv_len - offset, in copy_bio_to_actor()
69 if (offset >= bvec->bv_len) { in copy_bio_to_actor()
303 if (offset < bvec->bv_len - 1) { in squashfs_read_data()
A Dlzo_wrapper.c77 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lzo_uncompress()
A Dzlib_wrapper.c83 avail = min(length, ((int)bvec->bv_len) - offset); in zlib_uncompress()
/linux/drivers/xen/
A Dbiomerge.c15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
/linux/fs/netfs/
A Diterator.c123 len = bvecs[ix].bv_len; in netfs_limit_bvec()
132 len = min3(n, bvecs[ix].bv_len - skip, max_size); in netfs_limit_bvec()
/linux/fs/bcachefs/
A Dchecksum.c281 bch2_checksum_update(&state, p, bv.bv_len); in __bch2_checksum_bio()
287 bv.bv_len); in __bch2_checksum_bio()
304 crypto_shash_update(desc, p, bv.bv_len); in __bch2_checksum_bio()
311 bv.bv_len); in __bch2_checksum_bio()
349 .length = bv.bv_len, in __bch2_encrypt_bio()
/linux/lib/
A Diov_iter.c504 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
506 size -= bvec->bv_len; in iov_iter_bvec_advance()
633 size_t n = (--bvec)->bv_len; in iov_iter_revert()
670 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
823 size_t len = bvec->bv_len; in iov_iter_aligned_bvec()
915 size_t len = bvec->bv_len - skip; in iov_iter_alignment_bvec()
1140 len = i->bvec->bv_len - skip; in first_bvec_segment()
1198 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1267 size_t len = min(p->bv_len - skip, size); in bvec_npages()
1701 size = min(maxsize, i->bvec->bv_len - skip); in iov_iter_extract_bvec_pages()

Completed in 74 milliseconds

1234