1 /*
2 * Copyright (c) 2025 Antmicro <www.antmicro.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <errno.h>
10 #include "virtiofs.h"
11 #include <zephyr/drivers/virtio.h>
12
13 LOG_MODULE_REGISTER(virtiofs, CONFIG_VIRTIOFS_LOG_LEVEL);
14
15 /*
16 * According to 5.11.2 of virtio specification v1.3 the virtiofs queues are indexed as
17 * follows:
18 * - idx 0 - hiprio
19 * - idx 1 - notification queue
20 * - idx 2..n - request queues
21 * notification queue is available only if VIRTIO_FS_F_NOTIFICATION is present and
22 * there is no mention that in its absence the request queues will be shifted and start
23 * at idx 1, so the request queues shall start at idx 2. However in case of qemu+virtiofsd
24 * who don't support VIRTIO_FS_F_NOTIFICATION, the last available queue is at idx 1 and
25 * virtio_fs_config.num_request_queues states that there is a single request queue present
26 * which must be the one at idx 1
27 */
28 #ifdef CONFIG_VIRTIOFS_NO_NOTIFICATION_QUEUE_SLOT
29 #define REQUEST_QUEUE 1
30 #else
31 #define REQUEST_QUEUE 2
32 #endif
33
34
35 struct virtio_fs_config {
36 char tag[36];
37 uint32_t num_request_queues;
38 };
39
virtiofs_validate_response(const struct fuse_out_header * header,uint32_t opcode,uint32_t used_len,uint32_t expected_len)40 static int virtiofs_validate_response(
41 const struct fuse_out_header *header, uint32_t opcode, uint32_t used_len,
42 uint32_t expected_len)
43 {
44 if (used_len < sizeof(*header)) {
45 LOG_ERR("used length is smaller than size of fuse_out_header");
46 return -EIO;
47 }
48
49 if (header->error != 0) {
50 LOG_ERR(
51 "%s error %d (%s)",
52 fuse_opcode_to_string(opcode),
53 -header->error,
54 strerror(-header->error)
55 );
56 return header->error;
57 }
58
59 if (expected_len != -1 && header->len != expected_len) {
60 LOG_ERR(
61 "%s return message has invalid length (0x%x), expected 0x%x",
62 fuse_opcode_to_string(opcode),
63 header->len,
64 expected_len
65 );
66 return -EIO;
67 }
68
69 return 0;
70 }
71
72 struct recv_cb_param {
73 struct k_sem sem;
74 uint32_t used_len;
75 };
76
virtiofs_recv_cb(void * opaque,uint32_t used_len)77 void virtiofs_recv_cb(void *opaque, uint32_t used_len)
78 {
79 struct recv_cb_param *arg = opaque;
80
81 arg->used_len = used_len;
82 k_sem_give(&arg->sem);
83 }
84
virtiofs_send_receive(const struct device * dev,uint16_t virtq,struct virtq_buf * bufs,uint16_t bufs_size,uint16_t device_readable)85 static uint32_t virtiofs_send_receive(
86 const struct device *dev, uint16_t virtq, struct virtq_buf *bufs,
87 uint16_t bufs_size, uint16_t device_readable)
88 {
89 struct virtq *virtqueue = virtio_get_virtqueue(dev, virtq);
90 struct recv_cb_param cb_arg;
91
92 k_sem_init(&cb_arg.sem, 0, 1);
93
94 virtq_add_buffer_chain(
95 virtqueue, bufs, bufs_size, device_readable, virtiofs_recv_cb, &cb_arg,
96 K_FOREVER
97 );
98 virtio_notify_virtqueue(dev, virtq);
99
100 k_sem_take(&cb_arg.sem, K_FOREVER);
101
102 return cb_arg.used_len;
103 }
104
virtiofs_queue_enum_cb(uint16_t queue_idx,uint16_t max_size,void * unused)105 static uint16_t virtiofs_queue_enum_cb(uint16_t queue_idx, uint16_t max_size, void *unused)
106 {
107 if (queue_idx == REQUEST_QUEUE) {
108 return MIN(CONFIG_VIRTIOFS_MAX_VQUEUE_SIZE, max_size);
109 } else {
110 return 0;
111 }
112 }
113
virtiofs_init(const struct device * dev,struct fuse_init_out * response)114 int virtiofs_init(const struct device *dev, struct fuse_init_out *response)
115 {
116 struct virtio_fs_config *fs_config = virtio_get_device_specific_config(dev);
117 struct fuse_init_req req;
118 int ret = 0;
119
120 if (!fs_config) {
121 LOG_ERR("no virtio_fs_config present");
122 return -ENXIO;
123 }
124 if (fs_config->num_request_queues < 1) {
125 /* this shouldn't ever happen */
126 LOG_ERR("no request queue present");
127 return -ENODEV;
128 }
129
130 ret = virtio_commit_feature_bits(dev);
131 if (ret != 0) {
132 return ret;
133 }
134
135 ret = virtio_init_virtqueues(dev, REQUEST_QUEUE, virtiofs_queue_enum_cb, NULL);
136 if (ret != 0) {
137 LOG_ERR("failed to initialize fs virtqueues");
138 return ret;
139 }
140
141 virtio_finalize_init(dev);
142
143 fuse_create_init_req(&req);
144
145 struct virtq_buf buf[] = {
146 { .addr = &req.in_header, .len = sizeof(req.in_header) + sizeof(req.init_in) },
147 { .addr = &req.out_header, .len = sizeof(req.out_header) + sizeof(req.init_out) }
148 };
149
150 LOG_INF("sending FUSE_INIT, unique=%" PRIu64, req.in_header.unique);
151 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
152
153 LOG_INF("received FUSE_INIT response, unique=%" PRIu64, req.out_header.unique);
154
155 int valid_ret = virtiofs_validate_response(
156 &req.out_header, FUSE_INIT, used_len, buf[1].len
157 );
158
159 if (valid_ret != 0) {
160 return valid_ret;
161 }
162
163 if (req.init_out.major != FUSE_MAJOR_VERSION) {
164 LOG_ERR(
165 "FUSE_INIT major version mismatch (%d), version %d is supported",
166 req.init_out.major,
167 FUSE_MAJOR_VERSION
168 );
169 return -ENOTSUP;
170 }
171
172 if (req.init_out.minor < FUSE_MINOR_VERSION) {
173 LOG_ERR(
174 "FUSE_INIT minor version is too low (%d), version %d is supported",
175 req.init_out.minor,
176 FUSE_MINOR_VERSION
177 );
178 return -ENOTSUP;
179 }
180
181 *response = req.init_out;
182
183 #ifdef CONFIG_VIRTIOFS_DEBUG
184 fuse_dump_init_req_out(&req.init_out);
185 #endif
186
187 return 0;
188 }
189
190 /**
191 * @brief lookups object in the virtiofs filesystem
192 *
193 * @param dev virtio device its used on
194 * @param inode inode to start from
195 * @param path path to object we are looking for
196 * @param response virtiofs response for object
197 * @param parent_inode will be set to immediate parent inode of object that we are looking for.
198 * If immediate parent doesn't exist it will be set to 0. If not 0 it has to be FUSE_FORGET by
199 * caller. Can be NULL.
200 * @return 0 or error code on failure
201 */
virtiofs_lookup(const struct device * dev,uint64_t inode,const char * path,struct fuse_entry_out * response,uint64_t * parent_inode)202 int virtiofs_lookup(
203 const struct device *dev, uint64_t inode, const char *path, struct fuse_entry_out *response,
204 uint64_t *parent_inode)
205 {
206 uint32_t path_len = strlen(path) + 1;
207 const char *curr = path;
208 uint32_t curr_len = 0;
209 uint64_t curr_inode = inode;
210 struct fuse_lookup_req req;
211
212 /*
213 * we have to split path and lookup it dir by dir, because FUSE_LOOKUP doesn't work with
214 * full paths like abc/xyz/file. We have to lookup abc, then lookup xyz with abc's inode
215 * as a base and then lookup file with xyz's inode as a base
216 */
217 while (curr < path + path_len) {
218 curr_len = 0;
219 for (const char *c = curr; c < path + path_len - 1 && *c != '/'; c++) {
220 curr_len++;
221 }
222
223 fuse_create_lookup_req(&req, curr_inode, curr_len + 1);
224
225 struct virtq_buf buf[] = {
226 { .addr = &req.in_header, .len = sizeof(struct fuse_in_header) },
227 { .addr = (void *)curr, .len = curr_len },
228 /*
229 * despite length being part of in_header this still has to be null
230 * terminated
231 */
232 { .addr = "", .len = 1},
233 { .addr = &req.out_header,
234 .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out) }
235 };
236
237 LOG_INF(
238 "sending FUSE_LOOKUP for \"%s\", nodeid=%" PRIu64 ", unique=%" PRIu64,
239 curr, curr_inode, req.in_header.unique
240 );
241 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 4, 3);
242
243 LOG_INF("received FUSE_LOOKUP response, unique=%" PRIu64, req.out_header.unique);
244
245 int valid_ret = virtiofs_validate_response(
246 &req.out_header, FUSE_LOOKUP, used_len,
247 sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out)
248 );
249
250 if (parent_inode) {
251 *parent_inode = curr_inode;
252 }
253
254 *response = req.entry_out;
255 if (valid_ret != 0) {
256 if (parent_inode && (curr + curr_len + 1 != path + path_len)) {
257 /* there is no immediate parent */
258 if (*parent_inode != inode) {
259 virtiofs_forget(dev, *parent_inode, 1);
260 }
261 *parent_inode = 0;
262 }
263 return valid_ret;
264 }
265
266 #ifdef CONFIG_VIRTIOFS_DEBUG
267 fuse_dump_entry_out(&req.entry_out);
268 #endif
269 bool is_curr_parent = true;
270
271 for (const char *c = curr; c < path + path_len; c++) {
272 if (*c == '/') {
273 is_curr_parent = false;
274 }
275 }
276
277 /*
278 * unless its inode param passed to this function or a parent of object we
279 * are looking for, curr_inode won't be used anymore so we can forget it
280 */
281 if (curr_inode != inode && (!parent_inode || !is_curr_parent)) {
282 virtiofs_forget(dev, curr_inode, 1);
283 }
284
285 curr_inode = req.entry_out.nodeid;
286 curr += curr_len + 1;
287 }
288
289 return 0;
290 }
291
virtiofs_open(const struct device * dev,uint64_t inode,uint32_t flags,struct fuse_open_out * response,enum fuse_object_type type)292 int virtiofs_open(
293 const struct device *dev, uint64_t inode, uint32_t flags, struct fuse_open_out *response,
294 enum fuse_object_type type)
295 {
296 struct fuse_open_req req;
297
298 fuse_create_open_req(&req, inode, flags, type);
299
300 struct virtq_buf buf[] = {
301 { .addr = &req.in_header, .len = req.in_header.len },
302 { .addr = &req.out_header, .len = sizeof(req.out_header) + sizeof(req.open_out) }
303 };
304
305 LOG_INF(
306 "sending %s, nodeid=%" PRIu64 ", flags=0%" PRIo32 ", unique=%" PRIu64,
307 type == FUSE_DIR ? "FUSE_OPENDIR" : "FUSE_OPEN", inode, flags, req.in_header.unique
308 );
309 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
310
311 LOG_INF(
312 "received %s response, unique=%" PRIu64,
313 type == FUSE_DIR ? "FUSE_OPENDIR" : "FUSE_OPEN", req.out_header.unique
314 );
315
316 int valid_ret = virtiofs_validate_response(
317 &req.out_header, type == FUSE_DIR ? FUSE_OPENDIR : FUSE_OPEN, used_len, buf[1].len
318 );
319
320 if (valid_ret != 0) {
321 return valid_ret;
322 }
323
324 *response = req.open_out;
325
326 #ifdef CONFIG_VIRTIOFS_DEBUG
327 fuse_dump_open_req_out(&req.open_out);
328 #endif
329
330 return 0;
331 }
332
virtiofs_read(const struct device * dev,uint64_t inode,uint64_t fh,uint64_t offset,uint32_t size,uint8_t * readbuf)333 int virtiofs_read(
334 const struct device *dev, uint64_t inode, uint64_t fh,
335 uint64_t offset, uint32_t size, uint8_t *readbuf)
336 {
337 struct fuse_read_req req;
338
339 fuse_create_read_req(&req, inode, fh, offset, size, FUSE_FILE);
340
341 struct virtq_buf buf[] = {
342 { .addr = &req.in_header, .len = req.in_header.len },
343 { .addr = &req.out_header, .len = sizeof(struct fuse_out_header) },
344 { .addr = readbuf, .len = size }
345 };
346
347 LOG_INF(
348 "sending FUSE_READ, nodeid=%" PRIu64 ", fh=%" PRIu64 ", offset=%" PRIu64
349 ", size=%" PRIu32 ", unique=%" PRIu64,
350 inode, fh, offset, size, req.in_header.unique
351 );
352 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 3, 1);
353
354 LOG_INF("received FUSE_READ response, unique=%" PRIu64, req.out_header.unique);
355
356 int valid_ret = virtiofs_validate_response(&req.out_header, FUSE_READ, used_len, -1);
357
358 if (valid_ret != 0) {
359 return valid_ret;
360 }
361
362 return req.out_header.len - sizeof(req.out_header);
363 }
364
virtiofs_release(const struct device * dev,uint64_t inode,uint64_t fh,enum fuse_object_type type)365 int virtiofs_release(const struct device *dev, uint64_t inode, uint64_t fh,
366 enum fuse_object_type type)
367 {
368 struct fuse_release_req req;
369
370 fuse_create_release_req(&req, inode, fh, type);
371
372 struct virtq_buf buf[] = {
373 { .addr = &req.in_header, .len = req.in_header.len },
374 { .addr = &req.out_header, .len = sizeof(req.out_header) }
375 };
376
377 LOG_INF(
378 "sending %s, inode=%" PRIu64 ", fh=%" PRIu64 ", unique=%" PRIu64,
379 type == FUSE_DIR ? "FUSE_RELEASEDIR" : "FUSE_RELEASE", inode, fh,
380 req.in_header.unique
381 );
382 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
383
384 LOG_INF(
385 "received %s response, unique=%" PRIu64,
386 type == FUSE_DIR ? "FUSE_RELEASEDIR" : "FUSE_RELEASE", req.out_header.unique
387 );
388
389 return virtiofs_validate_response(
390 &req.out_header, type == FUSE_DIR ? FUSE_RELEASEDIR : FUSE_RELEASE, used_len, -1
391 );
392 }
393
virtiofs_destroy(const struct device * dev)394 int virtiofs_destroy(const struct device *dev)
395 {
396 struct fuse_destroy_req req;
397
398 fuse_create_destroy_req(&req);
399
400 struct virtq_buf buf[] = {
401 { .addr = &req.in_header, .len = sizeof(req.in_header) },
402 { .addr = &req.out_header, .len = sizeof(req.out_header) }
403 };
404
405 LOG_INF("sending FUSE_DESTROY, unique=%" PRIu64, req.in_header.unique);
406 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
407
408 LOG_INF("received FUSE_DESTROY response, unique=%" PRIu64, req.in_header.unique);
409
410 return virtiofs_validate_response(&req.out_header, FUSE_DESTROY, used_len, -1);
411 }
412
virtiofs_create(const struct device * dev,uint64_t inode,const char * fname,uint32_t flags,uint32_t mode,struct fuse_create_out * response)413 int virtiofs_create(
414 const struct device *dev, uint64_t inode, const char *fname, uint32_t flags,
415 uint32_t mode, struct fuse_create_out *response)
416 {
417 uint32_t fname_len = strlen(fname) + 1;
418 struct fuse_create_req req;
419
420 fuse_create_create_req(&req, inode, fname_len, flags, mode);
421
422 struct virtq_buf buf[] = {
423 { .addr = &req.in_header, .len = sizeof(req.in_header) + sizeof(req.create_in) },
424 { .addr = (void *)fname, .len = fname_len },
425 { .addr = &req.out_header, .len = sizeof(req.out_header) + sizeof(req.create_out) }
426 };
427
428 LOG_INF(
429 "sending FUSE_CREATE for \"%s\", nodeid=%" PRIu64 ", flags=0%" PRIo32
430 ", unique=%" PRIu64,
431 fname, inode, flags, req.in_header.unique
432 );
433 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 3, 2);
434
435 LOG_INF("received FUSE_CREATE response, unique=%" PRIu64, req.out_header.unique);
436
437 int valid_ret = virtiofs_validate_response(
438 &req.out_header, FUSE_CREATE, used_len, buf[2].len
439 );
440
441 if (valid_ret != 0) {
442 return valid_ret;
443 }
444
445 *response = req.create_out;
446
447 #ifdef CONFIG_VIRTIOFS_DEBUG
448 fuse_dump_create_req_out(&req.create_out);
449 #endif
450
451 return 0;
452 }
453
virtiofs_write(const struct device * dev,uint64_t inode,uint64_t fh,uint64_t offset,uint32_t size,const uint8_t * write_buf)454 int virtiofs_write(
455 const struct device *dev, uint64_t inode, uint64_t fh, uint64_t offset,
456 uint32_t size, const uint8_t *write_buf)
457 {
458 struct fuse_write_req req;
459
460 fuse_create_write_req(&req, inode, fh, offset, size);
461
462 struct virtq_buf buf[] = {
463 { .addr = &req.in_header, .len = sizeof(req.in_header) + sizeof(req.write_in) },
464 { .addr = (void *)write_buf, .len = size },
465 { .addr = &req.out_header, .len = sizeof(req.out_header) + sizeof(req.write_out) }
466 };
467
468 LOG_INF(
469 "sending FUSE_WRITE, nodeid=%" PRIu64", fh=%" PRIu64 ", offset=%" PRIu64
470 ", size=%" PRIu32 ", unique=%" PRIu64,
471 inode, fh, offset, size, req.in_header.unique
472 );
473 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 3, 2);
474
475 LOG_INF("received FUSE_WRITE response, unique=%" PRIu64, req.out_header.unique);
476
477 int valid_ret = virtiofs_validate_response(
478 &req.out_header, FUSE_WRITE, used_len, buf[2].len
479 );
480
481 if (valid_ret != 0) {
482 return valid_ret;
483 }
484
485 #ifdef CONFIG_VIRTIOFS_DEBUG
486 fuse_dump_write_out(&req.write_out);
487 #endif
488
489 return req.write_out.size;
490 }
491
virtiofs_lseek(const struct device * dev,uint64_t inode,uint64_t fh,uint64_t offset,uint32_t whence,struct fuse_lseek_out * response)492 int virtiofs_lseek(
493 const struct device *dev, uint64_t inode, uint64_t fh, uint64_t offset,
494 uint32_t whence, struct fuse_lseek_out *response)
495 {
496 struct fuse_lseek_req req;
497
498 fuse_create_lseek_req(&req, inode, fh, offset, whence);
499
500 struct virtq_buf buf[] = {
501 { .addr = &req.in_header, .len = req.in_header.len },
502 { .addr = &req.out_header, .len = sizeof(req.out_header) + sizeof(req.lseek_out) }
503 };
504
505 LOG_INF(
506 "sending FUSE_LSEEK, nodeid=%" PRIu64 ", fh=%" PRIu64 ", offset=%" PRIu64
507 ", whence=%" PRIu32 ", unique=%" PRIu64,
508 inode, fh, offset, whence, req.in_header.unique
509 );
510 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
511
512 LOG_INF("received FUSE_LSEEK response, unique=%" PRIu64, req.out_header.unique);
513
514 int valid_ret = virtiofs_validate_response(
515 &req.out_header, FUSE_LSEEK, used_len, buf[1].len
516 );
517
518 if (valid_ret != 0) {
519 return valid_ret;
520 }
521
522 *response = req.lseek_out;
523
524 #ifdef CONFIG_VIRTIOFS_DEBUG
525 fuse_dump_lseek_out(&req.lseek_out);
526 #endif
527
528 return 0;
529 }
530
virtiofs_setattr(const struct device * dev,uint64_t inode,struct fuse_setattr_in * in,struct fuse_attr_out * response)531 int virtiofs_setattr(
532 const struct device *dev, uint64_t inode, struct fuse_setattr_in *in,
533 struct fuse_attr_out *response)
534 {
535 struct fuse_setattr_req req;
536
537 fuse_create_setattr_req(&req, inode);
538
539 struct virtq_buf buf[] = {
540 { .addr = &req.in_header, .len = sizeof(req.in_header) },
541 { .addr = in, .len = sizeof(*in) },
542 { .addr = &req.out_header, .len = sizeof(req.out_header) },
543 { .addr = response, .len = sizeof(*response) },
544 };
545
546 LOG_INF("sending FUSE_SETATTR, unique=%" PRIu64, req.in_header.unique);
547 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 4, 2);
548
549 LOG_INF("received FUSE_SETATTR response, unique=%" PRIu64, req.out_header.unique);
550
551 int valid_ret = virtiofs_validate_response(
552 &req.out_header, FUSE_SETATTR, used_len, sizeof(req.out_header) + sizeof(*response)
553 );
554
555 if (valid_ret != 0) {
556 return valid_ret;
557 }
558
559 #ifdef CONFIG_VIRTIOFS_DEBUG
560 fuse_dump_attr_out(response);
561 #endif
562
563 return 0;
564 }
565
virtiofs_fsync(const struct device * dev,uint64_t inode,uint64_t fh)566 int virtiofs_fsync(const struct device *dev, uint64_t inode, uint64_t fh)
567 {
568 struct fuse_fsync_req req;
569
570 fuse_create_fsync_req(&req, inode, fh);
571
572 struct virtq_buf buf[] = {
573 { .addr = &req.in_header,
574 .len = sizeof(req.in_header) + sizeof(req.fsync_in) },
575 { .addr = &req.out_header, .len = sizeof(req.out_header) }
576 };
577
578 LOG_INF(
579 "sending FUSE_FSYNC, nodeid=%" PRIu64 ", fh=%" PRIu64 ", unique=%" PRIu64,
580 inode, fh, req.in_header.unique
581 );
582 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
583
584 LOG_INF("received FUSE_FSYNC response, unique=%" PRIu64, req.out_header.unique);
585
586 return virtiofs_validate_response(
587 &req.out_header, FUSE_FSYNC, used_len, sizeof(req.out_header)
588 );
589 }
590
virtiofs_mkdir(const struct device * dev,uint64_t inode,const char * dirname,uint32_t mode)591 int virtiofs_mkdir(const struct device *dev, uint64_t inode, const char *dirname, uint32_t mode)
592 {
593 struct fuse_mkdir_req req;
594 uint32_t dirname_len = strlen(dirname) + 1;
595
596 fuse_create_mkdir_req(&req, inode, dirname_len, mode);
597
598 struct virtq_buf buf[] = {
599 { .addr = &req.in_header, .len = sizeof(req.in_header) + sizeof(req.mkdir_in) },
600 { .addr = (void *)dirname, .len = dirname_len },
601 { .addr = &req.out_header, .len = sizeof(req.out_header) + sizeof(req.entry_out) }
602 };
603
604 LOG_INF(
605 "sending FUSE_MKDIR %s, inode=%" PRIu64 ", unique=%" PRIu64,
606 dirname, inode, req.in_header.unique
607 );
608 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 3, 2);
609
610 LOG_INF("received FUSE_MKDIR response, unique=%" PRIu64, req.out_header.unique);
611
612 int valid_ret = virtiofs_validate_response(
613 &req.out_header, FUSE_MKDIR, used_len, buf[2].len
614 );
615
616 if (valid_ret != 0) {
617 return valid_ret;
618 }
619
620 return 0;
621 }
622
virtiofs_unlink(const struct device * dev,const char * fname,enum fuse_object_type type)623 int virtiofs_unlink(const struct device *dev, const char *fname, enum fuse_object_type type)
624 {
625 struct fuse_unlink_req req;
626 uint32_t fname_len = strlen(fname) + 1;
627
628 fuse_create_unlink_req(&req, fname_len, type);
629
630 struct virtq_buf buf[] = {
631 { .addr = &req.in_header, .len = sizeof(req.in_header) },
632 { .addr = (void *)fname, .len = fname_len },
633 { .addr = &req.out_header, .len = sizeof(req.out_header) }
634 };
635
636 LOG_INF(
637 "sending %s for %s, unique=%" PRIu64,
638 type == FUSE_DIR ? "FUSE_RMDIR" : "FUSE_UNLINK", fname, req.in_header.unique
639 );
640 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 3, 2);
641
642 LOG_INF(
643 "received %s response, unique=%" PRIu64,
644 type == FUSE_DIR ? "FUSE_RMDIR" : "FUSE_UNLINK", req.out_header.unique
645 );
646
647 return virtiofs_validate_response(
648 &req.out_header, type == FUSE_DIR ? FUSE_RMDIR : FUSE_UNLINK, used_len,
649 sizeof(req.out_header)
650 );
651 }
652
virtiofs_rename(const struct device * dev,uint64_t old_dir_inode,const char * old_name,uint64_t new_dir_inode,const char * new_name)653 int virtiofs_rename(
654 const struct device *dev, uint64_t old_dir_inode, const char *old_name,
655 uint64_t new_dir_inode, const char *new_name)
656 {
657 struct fuse_rename_req req;
658 uint32_t old_len = strlen(old_name) + 1;
659 uint32_t new_len = strlen(new_name) + 1;
660
661 fuse_create_rename_req(&req, old_dir_inode, old_len, new_dir_inode, new_len);
662
663 struct virtq_buf buf[] = {
664 { .addr = &req.in_header, .len = sizeof(req.in_header) + sizeof(req.rename_in) },
665 { .addr = (void *)old_name, .len = old_len },
666 { .addr = (void *)new_name, .len = new_len },
667 { .addr = &req.out_header, .len = sizeof(req.out_header) }
668 };
669
670 LOG_INF(
671 "sending FUSE_RENAME %s to %s, unique=%" PRIu64,
672 old_name, new_name, req.in_header.unique
673 );
674 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 4, 3);
675
676 LOG_INF("received FUSE_RENAME response, unique=%" PRIu64, req.out_header.unique);
677
678 return virtiofs_validate_response(
679 &req.out_header, FUSE_RENAME, used_len, sizeof(req.out_header)
680 );
681 }
682
virtiofs_statfs(const struct device * dev,struct fuse_kstatfs * response)683 int virtiofs_statfs(const struct device *dev, struct fuse_kstatfs *response)
684 {
685 struct fuse_kstatfs_req req;
686
687 fuse_fill_header(&req.in_header, sizeof(req.in_header), FUSE_STATFS, FUSE_ROOT_INODE);
688
689 struct virtq_buf buf[] = {
690 { .addr = &req.in_header, .len = sizeof(req.in_header) },
691 { .addr = &req.out_header,
692 .len = sizeof(req.out_header) + sizeof(req.kstatfs_out) }
693 };
694
695 LOG_INF("sending FUSE_STATFS, unique=%" PRIu64, req.in_header.unique);
696 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 2, 1);
697
698 LOG_INF("received FUSE_STATFS response, unique=%" PRIu64, req.out_header.unique);
699
700 int valid_ret = virtiofs_validate_response(
701 &req.out_header, FUSE_STATFS, used_len, buf[1].len
702 );
703
704 if (valid_ret != 0) {
705 return valid_ret;
706 }
707
708 #ifdef CONFIG_VIRTIOFS_DEBUG
709 fuse_dump_kstafs(&req.kstatfs_out);
710 #endif
711
712 *response = req.kstatfs_out;
713
714 return 0;
715 }
716
virtiofs_readdir(const struct device * dev,uint64_t inode,uint64_t fh,uint64_t offset,uint8_t * dirent_buf,uint32_t dirent_size,uint8_t * name_buf,uint32_t name_size)717 int virtiofs_readdir(
718 const struct device *dev, uint64_t inode, uint64_t fh, uint64_t offset,
719 uint8_t *dirent_buf, uint32_t dirent_size, uint8_t *name_buf, uint32_t name_size)
720 {
721 struct fuse_read_req req;
722
723 fuse_create_read_req(&req, inode, fh, offset, dirent_size + name_size, FUSE_DIR);
724
725 struct virtq_buf buf[] = {
726 { .addr = &req.in_header, .len = req.in_header.len },
727 { .addr = &req.out_header, .len = sizeof(struct fuse_out_header) },
728 { .addr = dirent_buf, .len = dirent_size },
729 { .addr = name_buf, .len = name_size }
730 };
731
732 LOG_INF(
733 "sending FUSE_READDIR, nodeid=%" PRIu64 ", fh=%" PRIu64 ", offset=%" PRIu64
734 ", size=%" PRIu32 ", unique=%" PRIu64,
735 inode, fh, offset, dirent_size + name_size, req.in_header.unique
736 );
737 uint32_t used_len = virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 4, 1);
738
739 LOG_INF("received FUSE_READDIR response, unique=%" PRIu64, req.out_header.unique);
740
741 int valid_ret = virtiofs_validate_response(&req.out_header, FUSE_READDIR, used_len, -1);
742
743 if (valid_ret != 0) {
744 return valid_ret;
745 }
746
747 return req.out_header.len - sizeof(req.out_header);
748 }
749
virtiofs_forget(const struct device * dev,uint64_t inode,uint64_t nlookup)750 void virtiofs_forget(const struct device *dev, uint64_t inode, uint64_t nlookup)
751 {
752 if (inode == FUSE_ROOT_INODE) {
753 return;
754 }
755
756 struct fuse_forget_req req;
757
758 fuse_fill_header(&req.in_header, sizeof(req.in_header), FUSE_FORGET, inode);
759 req.forget_in.nlookup = nlookup; /* refcount will be decreased by this value */
760
761 struct virtq_buf buf[] = {
762 { .addr = &req, .len = sizeof(req.in_header) + sizeof(req.forget_in) }
763 };
764
765 LOG_INF(
766 "sending FUSE_FORGET nodeid=%" PRIu64 ", nlookup=%" PRIu64 ", unique=%" PRIu64,
767 inode, nlookup, req.in_header.unique
768 );
769 virtiofs_send_receive(dev, REQUEST_QUEUE, buf, 1, 1);
770 LOG_INF("received FUSE_FORGET response, unique=%" PRIu64, req.in_header.unique);
771
772 /*
773 * In comparison to other fuse operations this one doesn't return fuse_out_header,
774 * despite virtio spec v1.3 5.11.6.1 saying that out header is common to all
775 * types of fuse requests (comment in include/uapi/linux/fuse.h states otherwise that
776 * FUSE_FORGET has no reply), so there is no error code to return
777 */
778 }
779