1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file contians vfs address (mmap) ops for 9P2000.
4 *
5 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7 */
8
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/swap.h>
19 #include <linux/uio.h>
20 #include <linux/netfs.h>
21 #include <net/9p/9p.h>
22 #include <net/9p/client.h>
23
24 #include "v9fs.h"
25 #include "v9fs_vfs.h"
26 #include "cache.h"
27 #include "fid.h"
28
29 /**
30 * v9fs_issue_read - Issue a read from 9P
31 * @subreq: The read to make
32 */
v9fs_issue_read(struct netfs_io_subrequest * subreq)33 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
34 {
35 struct netfs_io_request *rreq = subreq->rreq;
36 struct p9_fid *fid = rreq->netfs_priv;
37 struct iov_iter to;
38 loff_t pos = subreq->start + subreq->transferred;
39 size_t len = subreq->len - subreq->transferred;
40 int total, err;
41
42 iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
43
44 total = p9_client_read(fid, pos, &to, &err);
45
46 /* if we just extended the file size, any portion not in
47 * cache won't be on server and is zeroes */
48 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
49
50 netfs_subreq_terminated(subreq, err ?: total, false);
51 }
52
53 /**
54 * v9fs_init_request - Initialise a read request
55 * @rreq: The read request
56 * @file: The file being read from
57 */
v9fs_init_request(struct netfs_io_request * rreq,struct file * file)58 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
59 {
60 struct inode *inode = file_inode(file);
61 struct v9fs_inode *v9inode = V9FS_I(inode);
62 struct p9_fid *fid = file->private_data;
63
64 BUG_ON(!fid);
65
66 /* we might need to read from a fid that was opened write-only
67 * for read-modify-write of page cache, use the writeback fid
68 * for that */
69 if (rreq->origin == NETFS_READ_FOR_WRITE &&
70 (fid->mode & O_ACCMODE) == O_WRONLY) {
71 fid = v9inode->writeback_fid;
72 BUG_ON(!fid);
73 }
74
75 p9_fid_get(fid);
76 rreq->netfs_priv = fid;
77 return 0;
78 }
79
80 /**
81 * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
82 * @rreq: The I/O request to clean up
83 */
v9fs_free_request(struct netfs_io_request * rreq)84 static void v9fs_free_request(struct netfs_io_request *rreq)
85 {
86 struct p9_fid *fid = rreq->netfs_priv;
87
88 p9_fid_put(fid);
89 }
90
91 /**
92 * v9fs_begin_cache_operation - Begin a cache operation for a read
93 * @rreq: The read request
94 */
v9fs_begin_cache_operation(struct netfs_io_request * rreq)95 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
96 {
97 #ifdef CONFIG_9P_FSCACHE
98 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
99
100 return fscache_begin_read_operation(&rreq->cache_resources, cookie);
101 #else
102 return -ENOBUFS;
103 #endif
104 }
105
106 const struct netfs_request_ops v9fs_req_ops = {
107 .init_request = v9fs_init_request,
108 .free_request = v9fs_free_request,
109 .begin_cache_operation = v9fs_begin_cache_operation,
110 .issue_read = v9fs_issue_read,
111 };
112
113 /**
114 * v9fs_release_folio - release the private state associated with a folio
115 * @folio: The folio to be released
116 * @gfp: The caller's allocation restrictions
117 *
118 * Returns true if the page can be released, false otherwise.
119 */
120
v9fs_release_folio(struct folio * folio,gfp_t gfp)121 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
122 {
123 struct inode *inode = folio_inode(folio);
124
125 if (folio_test_private(folio))
126 return false;
127 #ifdef CONFIG_9P_FSCACHE
128 if (folio_test_fscache(folio)) {
129 if (current_is_kswapd() || !(gfp & __GFP_FS))
130 return false;
131 folio_wait_fscache(folio);
132 }
133 #endif
134 fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
135 return true;
136 }
137
v9fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)138 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
139 size_t length)
140 {
141 folio_wait_fscache(folio);
142 }
143
v9fs_write_to_cache_done(void * priv,ssize_t transferred_or_error,bool was_async)144 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
145 bool was_async)
146 {
147 struct v9fs_inode *v9inode = priv;
148 __le32 version;
149
150 if (IS_ERR_VALUE(transferred_or_error) &&
151 transferred_or_error != -ENOBUFS) {
152 version = cpu_to_le32(v9inode->qid.version);
153 fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
154 i_size_read(&v9inode->netfs.inode), 0);
155 }
156 }
157
v9fs_vfs_write_folio_locked(struct folio * folio)158 static int v9fs_vfs_write_folio_locked(struct folio *folio)
159 {
160 struct inode *inode = folio_inode(folio);
161 struct v9fs_inode *v9inode = V9FS_I(inode);
162 struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
163 loff_t start = folio_pos(folio);
164 loff_t i_size = i_size_read(inode);
165 struct iov_iter from;
166 size_t len = folio_size(folio);
167 int err;
168
169 if (start >= i_size)
170 return 0; /* Simultaneous truncation occurred */
171
172 len = min_t(loff_t, i_size - start, len);
173
174 iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
175
176 /* We should have writeback_fid always set */
177 BUG_ON(!v9inode->writeback_fid);
178
179 folio_wait_fscache(folio);
180 folio_start_writeback(folio);
181
182 p9_client_write(v9inode->writeback_fid, start, &from, &err);
183
184 if (err == 0 &&
185 fscache_cookie_enabled(cookie) &&
186 test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
187 folio_start_fscache(folio);
188 fscache_write_to_cache(v9fs_inode_cookie(v9inode),
189 folio_mapping(folio), start, len, i_size,
190 v9fs_write_to_cache_done, v9inode,
191 true);
192 }
193
194 folio_end_writeback(folio);
195 return err;
196 }
197
v9fs_vfs_writepage(struct page * page,struct writeback_control * wbc)198 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
199 {
200 struct folio *folio = page_folio(page);
201 int retval;
202
203 p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
204
205 retval = v9fs_vfs_write_folio_locked(folio);
206 if (retval < 0) {
207 if (retval == -EAGAIN) {
208 folio_redirty_for_writepage(wbc, folio);
209 retval = 0;
210 } else {
211 mapping_set_error(folio_mapping(folio), retval);
212 }
213 } else
214 retval = 0;
215
216 folio_unlock(folio);
217 return retval;
218 }
219
v9fs_launder_folio(struct folio * folio)220 static int v9fs_launder_folio(struct folio *folio)
221 {
222 int retval;
223
224 if (folio_clear_dirty_for_io(folio)) {
225 retval = v9fs_vfs_write_folio_locked(folio);
226 if (retval)
227 return retval;
228 }
229 folio_wait_fscache(folio);
230 return 0;
231 }
232
233 /**
234 * v9fs_direct_IO - 9P address space operation for direct I/O
235 * @iocb: target I/O control block
236 * @iter: The data/buffer to use
237 *
238 * The presence of v9fs_direct_IO() in the address space ops vector
239 * allowes open() O_DIRECT flags which would have failed otherwise.
240 *
241 * In the non-cached mode, we shunt off direct read and write requests before
242 * the VFS gets them, so this method should never be called.
243 *
244 * Direct IO is not 'yet' supported in the cached mode. Hence when
245 * this routine is called through generic_file_aio_read(), the read/write fails
246 * with an error.
247 *
248 */
249 static ssize_t
v9fs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)250 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
251 {
252 struct file *file = iocb->ki_filp;
253 loff_t pos = iocb->ki_pos;
254 ssize_t n;
255 int err = 0;
256
257 if (iov_iter_rw(iter) == WRITE) {
258 n = p9_client_write(file->private_data, pos, iter, &err);
259 if (n) {
260 struct inode *inode = file_inode(file);
261 loff_t i_size = i_size_read(inode);
262
263 if (pos + n > i_size)
264 inode_add_bytes(inode, pos + n - i_size);
265 }
266 } else {
267 n = p9_client_read(file->private_data, pos, iter, &err);
268 }
269 return n ? n : err;
270 }
271
v9fs_write_begin(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,struct page ** subpagep,void ** fsdata)272 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
273 loff_t pos, unsigned int len,
274 struct page **subpagep, void **fsdata)
275 {
276 int retval;
277 struct folio *folio;
278 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
279
280 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
281
282 /* Prefetch area to be written into the cache if we're caching this
283 * file. We need to do this before we get a lock on the page in case
284 * there's more than one writer competing for the same cache block.
285 */
286 retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
287 if (retval < 0)
288 return retval;
289
290 *subpagep = &folio->page;
291 return retval;
292 }
293
v9fs_write_end(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,unsigned int copied,struct page * subpage,void * fsdata)294 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
295 loff_t pos, unsigned int len, unsigned int copied,
296 struct page *subpage, void *fsdata)
297 {
298 loff_t last_pos = pos + copied;
299 struct folio *folio = page_folio(subpage);
300 struct inode *inode = mapping->host;
301 struct v9fs_inode *v9inode = V9FS_I(inode);
302
303 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
304
305 if (!folio_test_uptodate(folio)) {
306 if (unlikely(copied < len)) {
307 copied = 0;
308 goto out;
309 }
310
311 folio_mark_uptodate(folio);
312 }
313
314 /*
315 * No need to use i_size_read() here, the i_size
316 * cannot change under us because we hold the i_mutex.
317 */
318 if (last_pos > inode->i_size) {
319 inode_add_bytes(inode, last_pos - inode->i_size);
320 i_size_write(inode, last_pos);
321 fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
322 }
323 folio_mark_dirty(folio);
324 out:
325 folio_unlock(folio);
326 folio_put(folio);
327
328 return copied;
329 }
330
331 #ifdef CONFIG_9P_FSCACHE
332 /*
333 * Mark a page as having been made dirty and thus needing writeback. We also
334 * need to pin the cache object to write back to.
335 */
v9fs_dirty_folio(struct address_space * mapping,struct folio * folio)336 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
337 {
338 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
339
340 return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
341 }
342 #else
343 #define v9fs_dirty_folio filemap_dirty_folio
344 #endif
345
346 const struct address_space_operations v9fs_addr_operations = {
347 .read_folio = netfs_read_folio,
348 .readahead = netfs_readahead,
349 .dirty_folio = v9fs_dirty_folio,
350 .writepage = v9fs_vfs_writepage,
351 .write_begin = v9fs_write_begin,
352 .write_end = v9fs_write_end,
353 .release_folio = v9fs_release_folio,
354 .invalidate_folio = v9fs_invalidate_folio,
355 .launder_folio = v9fs_launder_folio,
356 .direct_IO = v9fs_direct_IO,
357 };
358