Lines Matching refs:i

90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)  in fault_in_iov_iter_readable()  argument
92 if (iter_is_ubuf(i)) { in fault_in_iov_iter_readable()
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
96 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_readable()
97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
133 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
135 if (iter_is_ubuf(i)) { in fault_in_iov_iter_writeable()
136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
139 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_writeable()
140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
162 void iov_iter_init(struct iov_iter *i, unsigned int direction, in iov_iter_init() argument
167 *i = (struct iov_iter) { in iov_iter_init()
179 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_to_iter() argument
181 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
183 if (user_backed_iter(i)) in _copy_to_iter()
185 return iterate_and_advance(i, bytes, (void *)addr, in _copy_to_iter()
234 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_mc_to_iter() argument
236 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
238 if (user_backed_iter(i)) in _copy_mc_to_iter()
240 return iterate_and_advance(i, bytes, (void *)addr, in _copy_mc_to_iter()
247 size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in __copy_from_iter() argument
249 return iterate_and_advance(i, bytes, addr, in __copy_from_iter()
253 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter() argument
255 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
258 if (user_backed_iter(i)) in _copy_from_iter()
260 return __copy_from_iter(addr, bytes, i); in _copy_from_iter()
271 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_nocache() argument
273 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
276 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_nocache()
314 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_flushcache() argument
316 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
319 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_flushcache()
350 struct iov_iter *i) in copy_page_to_iter() argument
355 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
362 n = _copy_to_iter(kaddr + offset, n, i); in copy_page_to_iter()
379 struct iov_iter *i) in copy_page_to_iter_nofault() argument
385 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
393 n = iterate_and_advance(i, n, kaddr + offset, in copy_page_to_iter_nofault()
412 struct iov_iter *i) in copy_page_from_iter() argument
422 n = _copy_from_iter(kaddr + offset, n, i); in copy_page_from_iter()
453 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) in iov_iter_zero() argument
455 return iterate_and_advance(i, bytes, NULL, in iov_iter_zero()
461 size_t bytes, struct iov_iter *i) in copy_folio_from_iter_atomic() argument
467 if (WARN_ON_ONCE(!i->data_source)) in copy_folio_from_iter_atomic()
479 n = __copy_from_iter(to, n, i); in copy_folio_from_iter_atomic()
490 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
494 if (!i->count) in iov_iter_bvec_advance()
496 i->count -= size; in iov_iter_bvec_advance()
498 size += i->iov_offset; in iov_iter_bvec_advance()
500 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
505 i->iov_offset = size; in iov_iter_bvec_advance()
506 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
507 i->bvec = bvec; in iov_iter_bvec_advance()
510 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
514 if (!i->count) in iov_iter_iovec_advance()
516 i->count -= size; in iov_iter_iovec_advance()
518 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
519 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
524 i->iov_offset = size; in iov_iter_iovec_advance()
525 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
526 i->__iov = iov; in iov_iter_iovec_advance()
529 static void iov_iter_folioq_advance(struct iov_iter *i, size_t size) in iov_iter_folioq_advance() argument
531 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_advance()
532 unsigned int slot = i->folioq_slot; in iov_iter_folioq_advance()
534 if (!i->count) in iov_iter_folioq_advance()
536 i->count -= size; in iov_iter_folioq_advance()
543 size += i->iov_offset; /* From beginning of current segment. */ in iov_iter_folioq_advance()
557 i->iov_offset = size; in iov_iter_folioq_advance()
558 i->folioq_slot = slot; in iov_iter_folioq_advance()
559 i->folioq = folioq; in iov_iter_folioq_advance()
562 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
564 if (unlikely(i->count < size)) in iov_iter_advance()
565 size = i->count; in iov_iter_advance()
566 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { in iov_iter_advance()
567 i->iov_offset += size; in iov_iter_advance()
568 i->count -= size; in iov_iter_advance()
569 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { in iov_iter_advance()
571 iov_iter_iovec_advance(i, size); in iov_iter_advance()
572 } else if (iov_iter_is_bvec(i)) { in iov_iter_advance()
573 iov_iter_bvec_advance(i, size); in iov_iter_advance()
574 } else if (iov_iter_is_folioq(i)) { in iov_iter_advance()
575 iov_iter_folioq_advance(i, size); in iov_iter_advance()
576 } else if (iov_iter_is_discard(i)) { in iov_iter_advance()
577 i->count -= size; in iov_iter_advance()
582 static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll) in iov_iter_folioq_revert() argument
584 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_revert()
585 unsigned int slot = i->folioq_slot; in iov_iter_folioq_revert()
598 i->iov_offset = fsize - unroll; in iov_iter_folioq_revert()
604 i->folioq_slot = slot; in iov_iter_folioq_revert()
605 i->folioq = folioq; in iov_iter_folioq_revert()
608 void iov_iter_revert(struct iov_iter *i, size_t unroll) in iov_iter_revert() argument
614 i->count += unroll; in iov_iter_revert()
615 if (unlikely(iov_iter_is_discard(i))) in iov_iter_revert()
617 if (unroll <= i->iov_offset) { in iov_iter_revert()
618 i->iov_offset -= unroll; in iov_iter_revert()
621 unroll -= i->iov_offset; in iov_iter_revert()
622 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { in iov_iter_revert()
627 } else if (iov_iter_is_bvec(i)) { in iov_iter_revert()
628 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
631 i->nr_segs++; in iov_iter_revert()
633 i->bvec = bvec; in iov_iter_revert()
634 i->iov_offset = n - unroll; in iov_iter_revert()
639 } else if (iov_iter_is_folioq(i)) { in iov_iter_revert()
640 i->iov_offset = 0; in iov_iter_revert()
641 iov_iter_folioq_revert(i, unroll); in iov_iter_revert()
643 const struct iovec *iov = iter_iov(i); in iov_iter_revert()
646 i->nr_segs++; in iov_iter_revert()
648 i->__iov = iov; in iov_iter_revert()
649 i->iov_offset = n - unroll; in iov_iter_revert()
661 size_t iov_iter_single_seg_count(const struct iov_iter *i) in iov_iter_single_seg_count() argument
663 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
664 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_single_seg_count()
665 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
666 if (iov_iter_is_bvec(i)) in iov_iter_single_seg_count()
667 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
669 if (unlikely(iov_iter_is_folioq(i))) in iov_iter_single_seg_count()
670 return !i->count ? 0 : in iov_iter_single_seg_count()
671 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count); in iov_iter_single_seg_count()
672 return i->count; in iov_iter_single_seg_count()
676 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, in iov_iter_kvec() argument
681 *i = (struct iov_iter){ in iov_iter_kvec()
692 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, in iov_iter_bvec() argument
697 *i = (struct iov_iter){ in iov_iter_bvec()
722 void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, in iov_iter_folio_queue() argument
727 *i = (struct iov_iter) { in iov_iter_folio_queue()
751 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, in iov_iter_xarray() argument
755 *i = (struct iov_iter) { in iov_iter_xarray()
775 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) in iov_iter_discard() argument
778 *i = (struct iov_iter){ in iov_iter_discard()
787 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_iovec() argument
790 const struct iovec *iov = iter_iov(i); in iov_iter_aligned_iovec()
791 size_t size = i->count; in iov_iter_aligned_iovec()
792 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
812 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_bvec() argument
815 const struct bio_vec *bvec = i->bvec; in iov_iter_aligned_bvec()
816 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
817 size_t size = i->count; in iov_iter_aligned_bvec()
847 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, in iov_iter_is_aligned() argument
850 if (likely(iter_is_ubuf(i))) { in iov_iter_is_aligned()
851 if (i->count & len_mask) in iov_iter_is_aligned()
853 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
858 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_is_aligned()
859 return iov_iter_aligned_iovec(i, addr_mask, len_mask); in iov_iter_is_aligned()
861 if (iov_iter_is_bvec(i)) in iov_iter_is_aligned()
862 return iov_iter_aligned_bvec(i, addr_mask, len_mask); in iov_iter_is_aligned()
865 if (iov_iter_is_xarray(i)) { in iov_iter_is_aligned()
866 if (i->count & len_mask) in iov_iter_is_aligned()
868 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
871 if (iov_iter_is_folioq(i)) { in iov_iter_is_aligned()
872 if (i->count & len_mask) in iov_iter_is_aligned()
874 if (i->iov_offset & addr_mask) in iov_iter_is_aligned()
882 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) in iov_iter_alignment_iovec() argument
884 const struct iovec *iov = iter_iov(i); in iov_iter_alignment_iovec()
886 size_t size = i->count; in iov_iter_alignment_iovec()
887 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
904 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) in iov_iter_alignment_bvec() argument
906 const struct bio_vec *bvec = i->bvec; in iov_iter_alignment_bvec()
908 size_t size = i->count; in iov_iter_alignment_bvec()
909 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
925 unsigned long iov_iter_alignment(const struct iov_iter *i) in iov_iter_alignment() argument
927 if (likely(iter_is_ubuf(i))) { in iov_iter_alignment()
928 size_t size = i->count; in iov_iter_alignment()
930 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
935 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_alignment()
936 return iov_iter_alignment_iovec(i); in iov_iter_alignment()
938 if (iov_iter_is_bvec(i)) in iov_iter_alignment()
939 return iov_iter_alignment_bvec(i); in iov_iter_alignment()
942 if (iov_iter_is_folioq(i)) in iov_iter_alignment()
943 return i->iov_offset | i->count; in iov_iter_alignment()
944 if (iov_iter_is_xarray(i)) in iov_iter_alignment()
945 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
951 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) in iov_iter_gap_alignment() argument
955 size_t size = i->count; in iov_iter_gap_alignment()
958 if (iter_is_ubuf(i)) in iov_iter_gap_alignment()
961 if (WARN_ON(!iter_is_iovec(i))) in iov_iter_gap_alignment()
964 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
965 const struct iovec *iov = iter_iov(i) + k; in iov_iter_gap_alignment()
1082 static ssize_t iter_xarray_get_pages(struct iov_iter *i, in iter_xarray_get_pages() argument
1090 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1098 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1103 i->iov_offset += maxsize; in iter_xarray_get_pages()
1104 i->count -= maxsize; in iter_xarray_get_pages()
1109 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
1114 if (iter_is_ubuf(i)) in first_iovec_segment()
1115 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1117 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1118 const struct iovec *iov = iter_iov(i) + k; in first_iovec_segment()
1131 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment() argument
1135 size_t skip = i->iov_offset, len; in first_bvec_segment()
1137 len = i->bvec->bv_len - skip; in first_bvec_segment()
1140 skip += i->bvec->bv_offset; in first_bvec_segment()
1141 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1146 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, in __iov_iter_get_pages_alloc() argument
1152 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1153 maxsize = i->count; in __iov_iter_get_pages_alloc()
1159 if (likely(user_backed_iter(i))) { in __iov_iter_get_pages_alloc()
1163 if (iov_iter_rw(i) != WRITE) in __iov_iter_get_pages_alloc()
1165 if (i->nofault) in __iov_iter_get_pages_alloc()
1168 addr = first_iovec_segment(i, &maxsize); in __iov_iter_get_pages_alloc()
1178 iov_iter_advance(i, maxsize); in __iov_iter_get_pages_alloc()
1181 if (iov_iter_is_bvec(i)) { in __iov_iter_get_pages_alloc()
1185 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1197 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1198 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1199 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1200 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1201 i->bvec++; in __iov_iter_get_pages_alloc()
1202 i->nr_segs--; in __iov_iter_get_pages_alloc()
1206 if (iov_iter_is_folioq(i)) in __iov_iter_get_pages_alloc()
1207 return iter_folioq_get_pages(i, pages, maxsize, maxpages, start); in __iov_iter_get_pages_alloc()
1208 if (iov_iter_is_xarray(i)) in __iov_iter_get_pages_alloc()
1209 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); in __iov_iter_get_pages_alloc()
1213 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, in iov_iter_get_pages2() argument
1220 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); in iov_iter_get_pages2()
1224 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, in iov_iter_get_pages_alloc2() argument
1231 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); in iov_iter_get_pages_alloc2()
1240 static int iov_npages(const struct iov_iter *i, int maxpages) in iov_npages() argument
1242 size_t skip = i->iov_offset, size = i->count; in iov_npages()
1246 for (p = iter_iov(i); size; skip = 0, p++) { in iov_npages()
1260 static int bvec_npages(const struct iov_iter *i, int maxpages) in bvec_npages() argument
1262 size_t skip = i->iov_offset, size = i->count; in bvec_npages()
1266 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1278 int iov_iter_npages(const struct iov_iter *i, int maxpages) in iov_iter_npages() argument
1280 if (unlikely(!i->count)) in iov_iter_npages()
1282 if (likely(iter_is_ubuf(i))) { in iov_iter_npages()
1283 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1284 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1288 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_npages()
1289 return iov_npages(i, maxpages); in iov_iter_npages()
1290 if (iov_iter_is_bvec(i)) in iov_iter_npages()
1291 return bvec_npages(i, maxpages); in iov_iter_npages()
1292 if (iov_iter_is_folioq(i)) { in iov_iter_npages()
1293 unsigned offset = i->iov_offset % PAGE_SIZE; in iov_iter_npages()
1294 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1297 if (iov_iter_is_xarray(i)) { in iov_iter_npages()
1298 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1299 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1328 u32 i; in copy_compat_iovec_from_user() local
1333 for (i = 0; i < nr_segs; i++) { in copy_compat_iovec_from_user()
1337 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); in copy_compat_iovec_from_user()
1338 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); in copy_compat_iovec_from_user()
1345 iov[i].iov_base = compat_ptr(buf); in copy_compat_iovec_from_user()
1346 iov[i].iov_len = len; in copy_compat_iovec_from_user()
1426 struct iovec **iovp, struct iov_iter *i, in __import_iovec_ubuf() argument
1441 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1444 return i->count; in __import_iovec_ubuf()
1449 struct iov_iter *i, bool compat) in __import_iovec() argument
1456 return __import_iovec_ubuf(type, uvec, iovp, i, compat); in __import_iovec()
1489 iov_iter_init(i, type, iov, nr_segs, total_len); in __import_iovec()
1521 struct iovec **iovp, struct iov_iter *i) in import_iovec() argument
1523 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, in import_iovec()
1528 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) in import_ubuf() argument
1535 iov_iter_ubuf(i, rw, buf, len); in import_ubuf()
1552 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) in iov_iter_restore() argument
1554 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && in iov_iter_restore()
1555 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) in iov_iter_restore()
1557 i->iov_offset = state->iov_offset; in iov_iter_restore()
1558 i->count = state->count; in iov_iter_restore()
1559 if (iter_is_ubuf(i)) in iov_iter_restore()
1571 if (iov_iter_is_bvec(i)) in iov_iter_restore()
1572 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1574 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1575 i->nr_segs = state->nr_segs; in iov_iter_restore()
1582 static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i, in iov_iter_extract_folioq_pages() argument
1588 const struct folio_queue *folioq = i->folioq; in iov_iter_extract_folioq_pages()
1591 size_t extracted = 0, offset, slot = i->folioq_slot; in iov_iter_extract_folioq_pages()
1596 if (WARN_ON(i->iov_offset != 0)) in iov_iter_extract_folioq_pages()
1600 offset = i->iov_offset & ~PAGE_MASK; in iov_iter_extract_folioq_pages()
1610 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot); in iov_iter_extract_folioq_pages()
1615 i->count -= part; in iov_iter_extract_folioq_pages()
1616 i->iov_offset += part; in iov_iter_extract_folioq_pages()
1625 if (i->iov_offset >= fsize) { in iov_iter_extract_folioq_pages()
1626 i->iov_offset = 0; in iov_iter_extract_folioq_pages()
1635 i->folioq = folioq; in iov_iter_extract_folioq_pages()
1636 i->folioq_slot = slot; in iov_iter_extract_folioq_pages()
1644 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, in iov_iter_extract_xarray_pages() argument
1653 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1654 XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT); in iov_iter_extract_xarray_pages()
1682 iov_iter_advance(i, maxsize); in iov_iter_extract_xarray_pages()
1690 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, in iov_iter_extract_bvec_pages() argument
1696 size_t skip = i->iov_offset, size = 0; in iov_iter_extract_bvec_pages()
1700 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1703 if (i->iov_offset == i->bvec->bv_len) { in iov_iter_extract_bvec_pages()
1704 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1705 i->nr_segs--; in iov_iter_extract_bvec_pages()
1706 i->bvec++; in iov_iter_extract_bvec_pages()
1715 while (bi.bi_size && bi.bi_idx < i->nr_segs) { in iov_iter_extract_bvec_pages()
1716 struct bio_vec bv = bvec_iter_bvec(i->bvec, bi); in iov_iter_extract_bvec_pages()
1746 bvec_iter_advance_single(i->bvec, &bi, bv.bv_len); in iov_iter_extract_bvec_pages()
1749 iov_iter_advance(i, size); in iov_iter_extract_bvec_pages()
1757 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, in iov_iter_extract_kvec_pages() argument
1765 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages()
1769 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1771 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1774 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1775 i->nr_segs--; in iov_iter_extract_kvec_pages()
1776 i->kvec++; in iov_iter_extract_kvec_pages()
1780 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1805 iov_iter_advance(i, size); in iov_iter_extract_kvec_pages()
1821 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, in iov_iter_extract_user_pages() argument
1833 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1837 if (i->nofault) in iov_iter_extract_user_pages()
1840 addr = first_iovec_segment(i, &maxsize); in iov_iter_extract_user_pages()
1850 iov_iter_advance(i, maxsize); in iov_iter_extract_user_pages()
1897 ssize_t iov_iter_extract_pages(struct iov_iter *i, in iov_iter_extract_pages() argument
1904 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1908 if (likely(user_backed_iter(i))) in iov_iter_extract_pages()
1909 return iov_iter_extract_user_pages(i, pages, maxsize, in iov_iter_extract_pages()
1912 if (iov_iter_is_kvec(i)) in iov_iter_extract_pages()
1913 return iov_iter_extract_kvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1916 if (iov_iter_is_bvec(i)) in iov_iter_extract_pages()
1917 return iov_iter_extract_bvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1920 if (iov_iter_is_folioq(i)) in iov_iter_extract_pages()
1921 return iov_iter_extract_folioq_pages(i, pages, maxsize, in iov_iter_extract_pages()
1924 if (iov_iter_is_xarray(i)) in iov_iter_extract_pages()
1925 return iov_iter_extract_xarray_pages(i, pages, maxsize, in iov_iter_extract_pages()