Lines Matching refs:rreq
12 static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, in netfs_cache_expand_readahead() argument
17 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_expand_readahead()
23 static void netfs_rreq_expand(struct netfs_io_request *rreq, in netfs_rreq_expand() argument
29 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); in netfs_rreq_expand()
34 if (rreq->netfs_ops->expand_readahead) in netfs_rreq_expand()
35 rreq->netfs_ops->expand_readahead(rreq); in netfs_rreq_expand()
46 if (rreq->start != readahead_pos(ractl) || in netfs_rreq_expand()
47 rreq->len != readahead_length(ractl)) { in netfs_rreq_expand()
48 readahead_expand(ractl, rreq->start, rreq->len); in netfs_rreq_expand()
49 rreq->start = readahead_pos(ractl); in netfs_rreq_expand()
50 rreq->len = readahead_length(ractl); in netfs_rreq_expand()
52 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), in netfs_rreq_expand()
61 static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx) in netfs_begin_cache_read() argument
63 return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); in netfs_begin_cache_read()
84 struct netfs_io_request *rreq = subreq->rreq; in netfs_prepare_read_iterator() local
88 rsize = umin(rsize, rreq->io_streams[0].sreq_max_len); in netfs_prepare_read_iterator()
100 while (rreq->submitted < subreq->start + rsize) { in netfs_prepare_read_iterator()
103 added = rolling_buffer_load_from_ra(&rreq->buffer, ractl, in netfs_prepare_read_iterator()
107 rreq->submitted += added; in netfs_prepare_read_iterator()
113 if (unlikely(rreq->io_streams[0].sreq_max_segs)) { in netfs_prepare_read_iterator()
114 size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, in netfs_prepare_read_iterator()
115 rreq->io_streams[0].sreq_max_segs); in netfs_prepare_read_iterator()
123 subreq->io_iter = rreq->buffer.iter; in netfs_prepare_read_iterator()
126 rolling_buffer_advance(&rreq->buffer, subreq->len); in netfs_prepare_read_iterator()
130 static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rreq, in netfs_cache_prepare_read() argument
134 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_prepare_read()
149 static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq, in netfs_read_cache_to_pagecache() argument
152 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_read_cache_to_pagecache()
159 static void netfs_queue_read(struct netfs_io_request *rreq, in netfs_queue_read() argument
163 struct netfs_io_stream *stream = &rreq->io_streams[0]; in netfs_queue_read()
171 spin_lock(&rreq->lock); in netfs_queue_read()
184 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_queue_read()
187 spin_unlock(&rreq->lock); in netfs_queue_read()
190 static void netfs_issue_read(struct netfs_io_request *rreq, in netfs_issue_read() argument
195 rreq->netfs_ops->issue_read(subreq); in netfs_issue_read()
198 netfs_read_cache_to_pagecache(rreq, subreq); in netfs_issue_read()
215 static void netfs_read_to_pagecache(struct netfs_io_request *rreq, in netfs_read_to_pagecache() argument
218 struct netfs_inode *ictx = netfs_inode(rreq->inode); in netfs_read_to_pagecache()
219 unsigned long long start = rreq->start; in netfs_read_to_pagecache()
220 ssize_t size = rreq->len; in netfs_read_to_pagecache()
228 subreq = netfs_alloc_subrequest(rreq); in netfs_read_to_pagecache()
237 source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size); in netfs_read_to_pagecache()
240 unsigned long long zp = umin(ictx->zero_point, rreq->i_size); in netfs_read_to_pagecache()
243 if (unlikely(rreq->origin == NETFS_READ_SINGLE)) in netfs_read_to_pagecache()
244 zp = rreq->i_size; in netfs_read_to_pagecache()
254 rreq->debug_id, subreq->debug_index, in netfs_read_to_pagecache()
256 subreq->start, ictx->zero_point, rreq->i_size); in netfs_read_to_pagecache()
262 if (rreq->netfs_ops->prepare_read) { in netfs_read_to_pagecache()
263 ret = rreq->netfs_ops->prepare_read(subreq); in netfs_read_to_pagecache()
309 netfs_queue_read(rreq, subreq, size <= 0); in netfs_read_to_pagecache()
310 netfs_issue_read(rreq, subreq); in netfs_read_to_pagecache()
316 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_read_to_pagecache()
317 netfs_wake_collector(rreq); in netfs_read_to_pagecache()
321 cmpxchg(&rreq->error, 0, ret); in netfs_read_to_pagecache()
341 struct netfs_io_request *rreq; in netfs_readahead() local
347 rreq = netfs_alloc_request(ractl->mapping, ractl->file, start, size, in netfs_readahead()
349 if (IS_ERR(rreq)) in netfs_readahead()
352 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); in netfs_readahead()
354 ret = netfs_begin_cache_read(rreq, ictx); in netfs_readahead()
359 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), in netfs_readahead()
362 netfs_rreq_expand(rreq, ractl); in netfs_readahead()
364 rreq->submitted = rreq->start; in netfs_readahead()
365 if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) in netfs_readahead()
367 netfs_read_to_pagecache(rreq, ractl); in netfs_readahead()
369 return netfs_put_request(rreq, netfs_rreq_trace_put_return); in netfs_readahead()
372 return netfs_put_request(rreq, netfs_rreq_trace_put_failed); in netfs_readahead()
379 static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio, in netfs_create_singular_buffer() argument
384 if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) in netfs_create_singular_buffer()
387 added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags); in netfs_create_singular_buffer()
390 rreq->submitted = rreq->start + added; in netfs_create_singular_buffer()
399 struct netfs_io_request *rreq; in netfs_read_gaps() local
415 rreq = netfs_alloc_request(mapping, file, folio_pos(folio), flen, NETFS_READ_GAPS); in netfs_read_gaps()
416 if (IS_ERR(rreq)) { in netfs_read_gaps()
417 ret = PTR_ERR(rreq); in netfs_read_gaps()
421 ret = netfs_begin_cache_read(rreq, ctx); in netfs_read_gaps()
426 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_read_gaps); in netfs_read_gaps()
444 rreq->direct_bv = bvec; in netfs_read_gaps()
445 rreq->direct_bv_count = nr_bvec; in netfs_read_gaps()
457 iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len); in netfs_read_gaps()
458 rreq->submitted = rreq->start + flen; in netfs_read_gaps()
460 netfs_read_to_pagecache(rreq, NULL); in netfs_read_gaps()
465 ret = netfs_wait_for_read(rreq); in netfs_read_gaps()
471 netfs_put_request(rreq, netfs_rreq_trace_put_return); in netfs_read_gaps()
475 netfs_put_request(rreq, netfs_rreq_trace_put_discard); in netfs_read_gaps()
498 struct netfs_io_request *rreq; in netfs_read_folio() local
509 rreq = netfs_alloc_request(mapping, file, in netfs_read_folio()
512 if (IS_ERR(rreq)) { in netfs_read_folio()
513 ret = PTR_ERR(rreq); in netfs_read_folio()
517 ret = netfs_begin_cache_read(rreq, ctx); in netfs_read_folio()
522 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); in netfs_read_folio()
525 ret = netfs_create_singular_buffer(rreq, folio, 0); in netfs_read_folio()
529 netfs_read_to_pagecache(rreq, NULL); in netfs_read_folio()
530 ret = netfs_wait_for_read(rreq); in netfs_read_folio()
531 netfs_put_request(rreq, netfs_rreq_trace_put_return); in netfs_read_folio()
535 netfs_put_request(rreq, netfs_rreq_trace_put_discard); in netfs_read_folio()
629 struct netfs_io_request *rreq; in netfs_write_begin() local
664 rreq = netfs_alloc_request(mapping, file, in netfs_write_begin()
667 if (IS_ERR(rreq)) { in netfs_write_begin()
668 ret = PTR_ERR(rreq); in netfs_write_begin()
671 rreq->no_unlock_folio = folio->index; in netfs_write_begin()
672 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_write_begin()
674 ret = netfs_begin_cache_read(rreq, ctx); in netfs_write_begin()
679 trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); in netfs_write_begin()
682 ret = netfs_create_singular_buffer(rreq, folio, 0); in netfs_write_begin()
686 netfs_read_to_pagecache(rreq, NULL); in netfs_write_begin()
687 ret = netfs_wait_for_read(rreq); in netfs_write_begin()
690 netfs_put_request(rreq, netfs_rreq_trace_put_return); in netfs_write_begin()
702 netfs_put_request(rreq, netfs_rreq_trace_put_failed); in netfs_write_begin()
719 struct netfs_io_request *rreq; in netfs_prefetch_for_write() local
730 rreq = netfs_alloc_request(mapping, file, start, flen, in netfs_prefetch_for_write()
732 if (IS_ERR(rreq)) { in netfs_prefetch_for_write()
733 ret = PTR_ERR(rreq); in netfs_prefetch_for_write()
737 rreq->no_unlock_folio = folio->index; in netfs_prefetch_for_write()
738 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_prefetch_for_write()
739 ret = netfs_begin_cache_read(rreq, ctx); in netfs_prefetch_for_write()
744 trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write); in netfs_prefetch_for_write()
747 ret = netfs_create_singular_buffer(rreq, folio, NETFS_ROLLBUF_PAGECACHE_MARK); in netfs_prefetch_for_write()
751 netfs_read_to_pagecache(rreq, NULL); in netfs_prefetch_for_write()
752 ret = netfs_wait_for_read(rreq); in netfs_prefetch_for_write()
753 netfs_put_request(rreq, netfs_rreq_trace_put_return); in netfs_prefetch_for_write()
757 netfs_put_request(rreq, netfs_rreq_trace_put_discard); in netfs_prefetch_for_write()