Lines Matching refs:wreq

95 	struct netfs_io_request *wreq;  in netfs_create_write_req()  local
102 wreq = netfs_alloc_request(mapping, file, start, 0, origin); in netfs_create_write_req()
103 if (IS_ERR(wreq)) in netfs_create_write_req()
104 return wreq; in netfs_create_write_req()
106 _enter("R=%x", wreq->debug_id); in netfs_create_write_req()
108 ictx = netfs_inode(wreq->inode); in netfs_create_write_req()
110 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); in netfs_create_write_req()
111 if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0) in netfs_create_write_req()
114 wreq->cleaned_to = wreq->start; in netfs_create_write_req()
116 wreq->io_streams[0].stream_nr = 0; in netfs_create_write_req()
117 wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER; in netfs_create_write_req()
118 wreq->io_streams[0].prepare_write = ictx->ops->prepare_write; in netfs_create_write_req()
119 wreq->io_streams[0].issue_write = ictx->ops->issue_write; in netfs_create_write_req()
120 wreq->io_streams[0].collected_to = start; in netfs_create_write_req()
121 wreq->io_streams[0].transferred = 0; in netfs_create_write_req()
123 wreq->io_streams[1].stream_nr = 1; in netfs_create_write_req()
124 wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE; in netfs_create_write_req()
125 wreq->io_streams[1].collected_to = start; in netfs_create_write_req()
126 wreq->io_streams[1].transferred = 0; in netfs_create_write_req()
127 if (fscache_resources_valid(&wreq->cache_resources)) { in netfs_create_write_req()
128 wreq->io_streams[1].avail = true; in netfs_create_write_req()
129 wreq->io_streams[1].active = true; in netfs_create_write_req()
130 wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq; in netfs_create_write_req()
131 wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write; in netfs_create_write_req()
134 return wreq; in netfs_create_write_req()
136 wreq->error = -ENOMEM; in netfs_create_write_req()
137 netfs_put_request(wreq, netfs_rreq_trace_put_failed); in netfs_create_write_req()
158 static void netfs_prepare_write(struct netfs_io_request *wreq, in netfs_prepare_write() argument
163 struct iov_iter *wreq_iter = &wreq->buffer.iter; in netfs_prepare_write()
171 rolling_buffer_make_space(&wreq->buffer); in netfs_prepare_write()
173 subreq = netfs_alloc_subrequest(wreq); in netfs_prepare_write()
179 _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); in netfs_prepare_write()
188 stream->sreq_max_len = wreq->wsize; in netfs_prepare_write()
207 spin_lock(&wreq->lock); in netfs_prepare_write()
218 spin_unlock(&wreq->lock); in netfs_prepare_write()
231 struct netfs_io_request *wreq = subreq->rreq; in netfs_do_issue_write() local
233 _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); in netfs_do_issue_write()
260 void netfs_issue_write(struct netfs_io_request *wreq, in netfs_issue_write() argument
278 size_t netfs_advance_write(struct netfs_io_request *wreq, in netfs_advance_write() argument
290 _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); in netfs_advance_write()
293 netfs_issue_write(wreq, stream); in netfs_advance_write()
298 netfs_prepare_write(wreq, stream, start); in netfs_advance_write()
310 netfs_issue_write(wreq, stream); in netfs_advance_write()
320 static int netfs_write_folio(struct netfs_io_request *wreq, in netfs_write_folio() argument
324 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_write_folio()
325 struct netfs_io_stream *cache = &wreq->io_streams[1]; in netfs_write_folio()
337 if (rolling_buffer_make_space(&wreq->buffer) < 0) in netfs_write_folio()
344 i_size = i_size_read(wreq->inode); in netfs_write_folio()
351 wreq->nr_group_rel += netfs_folio_written_back(folio); in netfs_write_folio()
352 netfs_put_group_many(wreq->group, wreq->nr_group_rel); in netfs_write_folio()
353 wreq->nr_group_rel = 0; in netfs_write_folio()
357 if (fpos + fsize > wreq->i_size) in netfs_write_folio()
358 wreq->i_size = i_size; in netfs_write_folio()
368 if (wreq->origin == NETFS_WRITETHROUGH) { in netfs_write_folio()
401 netfs_issue_write(wreq, upload); in netfs_write_folio()
402 } else if (fgroup != wreq->group) { in netfs_write_folio()
407 netfs_issue_write(wreq, upload); in netfs_write_folio()
408 netfs_issue_write(wreq, cache); in netfs_write_folio()
413 netfs_issue_write(wreq, upload); in netfs_write_folio()
415 netfs_issue_write(wreq, cache); in netfs_write_folio()
421 if (wreq->origin == NETFS_WRITEBACK) in netfs_write_folio()
428 netfs_issue_write(wreq, upload); in netfs_write_folio()
444 rolling_buffer_append(&wreq->buffer, folio, 0); in netfs_write_folio()
454 stream = &wreq->io_streams[s]; in netfs_write_folio()
478 stream = &wreq->io_streams[s]; in netfs_write_folio()
488 stream = &wreq->io_streams[choose_s]; in netfs_write_folio()
492 rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); in netfs_write_folio()
496 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio()
498 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio()
510 rolling_buffer_advance(&wreq->buffer, fsize - iter_off); in netfs_write_folio()
511 atomic64_set(&wreq->issued_to, fpos + fsize); in netfs_write_folio()
514 kdebug("R=%x: No submit", wreq->debug_id); in netfs_write_folio()
518 netfs_issue_write(wreq, &wreq->io_streams[s]); in netfs_write_folio()
527 static void netfs_end_issue_write(struct netfs_io_request *wreq) in netfs_end_issue_write() argument
532 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); in netfs_end_issue_write()
535 struct netfs_io_stream *stream = &wreq->io_streams[s]; in netfs_end_issue_write()
541 netfs_issue_write(wreq, stream); in netfs_end_issue_write()
545 netfs_wake_collector(wreq); in netfs_end_issue_write()
555 struct netfs_io_request *wreq = NULL; in netfs_writepages() local
573 wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK); in netfs_writepages()
574 if (IS_ERR(wreq)) { in netfs_writepages()
575 error = PTR_ERR(wreq); in netfs_writepages()
579 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); in netfs_writepages()
580 trace_netfs_write(wreq, netfs_write_trace_writeback); in netfs_writepages()
584 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); in netfs_writepages()
587 WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to)); in netfs_writepages()
590 unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { in netfs_writepages()
591 set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); in netfs_writepages()
592 wreq->netfs_ops->begin_writeback(wreq); in netfs_writepages()
595 error = netfs_write_folio(wreq, wbc, folio); in netfs_writepages()
600 netfs_end_issue_write(wreq); in netfs_writepages()
603 netfs_wake_collector(wreq); in netfs_writepages()
605 netfs_put_request(wreq, netfs_rreq_trace_put_return); in netfs_writepages()
623 struct netfs_io_request *wreq = NULL; in netfs_begin_writethrough() local
628 wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, in netfs_begin_writethrough()
630 if (IS_ERR(wreq)) { in netfs_begin_writethrough()
632 return wreq; in netfs_begin_writethrough()
635 wreq->io_streams[0].avail = true; in netfs_begin_writethrough()
636 trace_netfs_write(wreq, netfs_write_trace_writethrough); in netfs_begin_writethrough()
637 return wreq; in netfs_begin_writethrough()
646 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, in netfs_advance_writethrough() argument
651 wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end); in netfs_advance_writethrough()
660 if (wreq->len == 0) in netfs_advance_writethrough()
667 wreq->len += copied; in netfs_advance_writethrough()
672 return netfs_write_folio(wreq, wbc, folio); in netfs_advance_writethrough()
678 ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, in netfs_end_writethrough() argument
681 struct netfs_inode *ictx = netfs_inode(wreq->inode); in netfs_end_writethrough()
684 _enter("R=%x", wreq->debug_id); in netfs_end_writethrough()
687 netfs_write_folio(wreq, wbc, writethrough_cache); in netfs_end_writethrough()
689 netfs_end_issue_write(wreq); in netfs_end_writethrough()
693 if (wreq->iocb) in netfs_end_writethrough()
696 ret = netfs_wait_for_write(wreq); in netfs_end_writethrough()
697 netfs_put_request(wreq, netfs_rreq_trace_put_return); in netfs_end_writethrough()
705 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len) in netfs_unbuffered_write() argument
707 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_unbuffered_write()
709 loff_t start = wreq->start; in netfs_unbuffered_write()
714 if (wreq->origin == NETFS_DIO_WRITE) in netfs_unbuffered_write()
715 inode_dio_begin(wreq->inode); in netfs_unbuffered_write()
721 part = netfs_advance_write(wreq, upload, start, len, false); in netfs_unbuffered_write()
724 rolling_buffer_advance(&wreq->buffer, part); in netfs_unbuffered_write()
725 if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) in netfs_unbuffered_write()
726 netfs_wait_for_paused_write(wreq); in netfs_unbuffered_write()
727 if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) in netfs_unbuffered_write()
731 netfs_end_issue_write(wreq); in netfs_unbuffered_write()
739 static int netfs_write_folio_single(struct netfs_io_request *wreq, in netfs_write_folio_single() argument
742 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_write_folio_single()
743 struct netfs_io_stream *cache = &wreq->io_streams[1]; in netfs_write_folio_single()
754 if (flen > wreq->i_size - fpos) { in netfs_write_folio_single()
755 flen = wreq->i_size - fpos; in netfs_write_folio_single()
758 } else if (flen == wreq->i_size - fpos) { in netfs_write_folio_single()
776 rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK); in netfs_write_folio_single()
786 stream = &wreq->io_streams[s]; in netfs_write_folio_single()
807 stream = &wreq->io_streams[s]; in netfs_write_folio_single()
817 stream = &wreq->io_streams[choose_s]; in netfs_write_folio_single()
821 rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); in netfs_write_folio_single()
825 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio_single()
827 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio_single()
838 wreq->buffer.iter.iov_offset = 0; in netfs_write_folio_single()
840 rolling_buffer_advance(&wreq->buffer, fsize - iter_off); in netfs_write_folio_single()
841 atomic64_set(&wreq->issued_to, fpos + fsize); in netfs_write_folio_single()
844 kdebug("R=%x: No submit", wreq->debug_id); in netfs_write_folio_single()
862 struct netfs_io_request *wreq; in netfs_writeback_single() local
880 wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE); in netfs_writeback_single()
881 if (IS_ERR(wreq)) { in netfs_writeback_single()
882 ret = PTR_ERR(wreq); in netfs_writeback_single()
886 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); in netfs_writeback_single()
887 trace_netfs_write(wreq, netfs_write_trace_writeback_single); in netfs_writeback_single()
890 if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) in netfs_writeback_single()
891 wreq->netfs_ops->begin_writeback(wreq); in netfs_writeback_single()
898 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); in netfs_writeback_single()
900 ret = netfs_write_folio_single(wreq, folio); in netfs_writeback_single()
911 netfs_issue_write(wreq, &wreq->io_streams[s]); in netfs_writeback_single()
913 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); in netfs_writeback_single()
916 netfs_wake_collector(wreq); in netfs_writeback_single()
918 netfs_put_request(wreq, netfs_rreq_trace_put_return); in netfs_writeback_single()