1 /*
2 * Copyright (c) 2023, Google Inc. All rights reserved.
3 * Author: codycswong@google.com (Cody Wong)
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24 #include <dev/virtio.h>
25 #include <dev/virtio/9p.h>
26 #include <kernel/event.h>
27 #include <kernel/vm.h>
28 #include <lk/debug.h>
29 #include <lk/err.h>
30 #include <lk/trace.h>
31
32 #include "protocol.h"
33
34 #define LOCAL_TRACE 0
35
pdu_init(struct p9_fcall * pdu,size_t size)36 static status_t pdu_init(struct p9_fcall *pdu, size_t size)
37 {
38 vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_9p_pdu", size,
39 (void *)&pdu->sdata, 0, 0,
40 ARCH_MMU_FLAG_UNCACHED_DEVICE);
41 if (!pdu->sdata)
42 return ERR_NO_MEMORY;
43 pdu->capacity = size;
44 return NO_ERROR;
45 }
46
pdu_fini(struct p9_fcall * pdu)47 static void pdu_fini(struct p9_fcall *pdu)
48 {
49 if (pdu->sdata)
50 vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)pdu->sdata);
51 pdu->sdata = NULL;
52 pdu->capacity = 0;
53 }
54
pdu_reset(struct p9_fcall * pdu)55 static void pdu_reset(struct p9_fcall *pdu)
56 {
57 pdu->offset = 0;
58 pdu->size = 0;
59 }
60
p9_req_prepare(struct p9_req * req,const virtio_9p_msg_t * tmsg)61 static status_t p9_req_prepare(struct p9_req *req,
62 const virtio_9p_msg_t *tmsg)
63 {
64 struct virtio_9p_dev *p9dev = containerof(req, struct virtio_9p_dev, req);
65 status_t ret = NO_ERROR;
66
67 if ((ret = pdu_init(&req->tc, p9dev->msize)) != NO_ERROR) {
68 goto err;
69 }
70
71 if ((ret = pdu_init(&req->rc, p9dev->msize)) != NO_ERROR) {
72 goto err;
73 }
74
75 pdu_reset(&req->tc);
76 pdu_reset(&req->rc);
77
78 event_init(&req->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);
79 req->status = P9_REQ_S_INITIALIZED;
80
81 // fill 9p header
82 if (pdu_writed(&req->tc, 0) != NO_ERROR) {
83 ret = ERR_IO;
84 goto err;
85 }
86 if (pdu_writeb(&req->tc, tmsg->msg_type) != NO_ERROR) {
87 ret = ERR_IO;
88 goto err;
89 }
90 if (pdu_writew(&req->tc, tmsg->tag) != NO_ERROR) {
91 ret = ERR_IO;
92 goto err;
93 }
94
95 return NO_ERROR;
96 err:
97 pdu_fini(&req->tc);
98 pdu_fini(&req->rc);
99 return ret;
100 }
101
p9_req_release(struct p9_req * req)102 static void p9_req_release(struct p9_req *req)
103 {
104 req->status = P9_REQ_S_UNKNOWN;
105 event_destroy(&req->io_event);
106
107 pdu_fini(&req->tc);
108 pdu_fini(&req->rc);
109 }
110
p9_req_finalize(struct p9_req * req)111 static status_t p9_req_finalize(struct p9_req *req)
112 {
113 uint32_t size = req->tc.size;
114 status_t ret;
115
116 pdu_reset(&req->tc);
117 ret = pdu_writed(&req->tc, size);
118 req->tc.size = size;
119 #if LOCAL_TRACE >= 2
120 LTRACEF("req->tc.sdata (%p) size (%u)\n", req->tc.sdata, size);
121 hexdump8(req->tc.sdata, size);
122 #endif
123
124 return ret;
125 }
126
p9_req_receive(struct p9_req * req,virtio_9p_msg_t * rmsg)127 static void p9_req_receive(struct p9_req *req,
128 virtio_9p_msg_t *rmsg)
129 {
130 pdu_readd(&req->rc);
131 rmsg->msg_type = pdu_readb(&req->rc);
132 rmsg->tag = pdu_readw(&req->rc);
133 #if LOCAL_TRACE >= 2
134 LTRACEF("req->rc.sdata (%p) req->rc.size (%u)\n", req->rc.sdata,
135 req->rc.size);
136 hexdump8(req->rc.sdata, req->rc.size);
137 #endif
138 }
139
virtio_9p_req_send(struct virtio_9p_dev * p9dev,struct p9_req * req)140 static void virtio_9p_req_send(struct virtio_9p_dev *p9dev,
141 struct p9_req *req)
142 {
143 struct virtio_device *dev = p9dev->dev;
144 struct vring_desc *desc;
145 uint16_t idx;
146
147 spin_lock_saved_state_t state;
148 spin_lock_irqsave(&p9dev->lock, state);
149
150 desc = virtio_alloc_desc_chain(dev, VIRTIO_9P_RING_IDX, 2, &idx);
151
152 desc->len = req->tc.size;
153 desc->addr = vaddr_to_paddr(req->tc.sdata);
154 desc->flags |= VRING_DESC_F_NEXT;
155 #if LOCAL_TRACE > 2
156 LTRACEF("desc (%p)\n", desc);
157 virtio_dump_desc(desc);
158 #endif
159
160 desc = virtio_desc_index_to_desc(dev, VIRTIO_9P_RING_IDX, desc->next);
161 desc->len = req->rc.capacity;
162 desc->addr = vaddr_to_paddr(req->rc.sdata);
163 desc->flags |= VRING_DESC_F_WRITE;
164 #if LOCAL_TRACE > 2
165 LTRACEF("desc (%p)\n", desc);
166 virtio_dump_desc(desc);
167 #endif
168
169 req->status = P9_REQ_S_SENT;
170
171 /* submit the transfer */
172 virtio_submit_chain(dev, VIRTIO_9P_RING_IDX, idx);
173
174 /* kick it off */
175 virtio_kick(dev, VIRTIO_9P_RING_IDX);
176
177 spin_unlock_irqrestore(&p9dev->lock, state);
178 }
179
virtio_9p_rpc(struct virtio_device * dev,const virtio_9p_msg_t * tmsg,virtio_9p_msg_t * rmsg)180 status_t virtio_9p_rpc(struct virtio_device *dev, const virtio_9p_msg_t *tmsg,
181 virtio_9p_msg_t *rmsg)
182 {
183 LTRACEF("dev (%p) tmsg (%p) rmsg (%p)\n", dev, tmsg, rmsg);
184
185 struct virtio_9p_dev *p9dev = dev->priv;
186 struct p9_req *req = &p9dev->req;
187 status_t ret;
188
189 if (!tmsg || !rmsg) {
190 return ERR_INVALID_ARGS;
191 }
192
193 // Since we allow only one outstanding request for now, we have a 9p device
194 // level lock for restricting only one rpc can be executed at a time. One
195 // day if we can support multiple outstanding requests, we should move the
196 // lock into the request allocation phase.
197 mutex_acquire(&p9dev->req_lock);
198
199 // prepare the message header
200 ret = p9_req_prepare(req, tmsg);
201 if (ret != NO_ERROR) {
202 goto req_unlock;
203 }
204
205 // setup the T-message by its msg-type
206 switch (tmsg->msg_type) {
207 case P9_TLOPEN:
208 ret = p9_proto_tlopen(req, tmsg);
209 break;
210 case P9_TGETATTR:
211 ret = p9_proto_tgetattr(req, tmsg);
212 break;
213 case P9_TVERSION:
214 ret = p9_proto_tversion(req, tmsg);
215 break;
216 case P9_TATTACH:
217 ret = p9_proto_tattach(req, tmsg);
218 break;
219 case P9_TWALK:
220 ret = p9_proto_twalk(req, tmsg);
221 break;
222 case P9_TOPEN:
223 ret = p9_proto_topen(req, tmsg);
224 break;
225 case P9_TREAD:
226 ret = p9_proto_tread(req, tmsg);
227 break;
228 case P9_TWRITE:
229 ret = p9_proto_twrite(req, tmsg);
230 break;
231 case P9_TCLUNK:
232 ret = p9_proto_tclunk(req, tmsg);
233 break;
234 case P9_TREMOVE:
235 ret = p9_proto_tremove(req, tmsg);
236 break;
237 case P9_TLCREATE:
238 ret = p9_proto_tlcreate(req, tmsg);
239 break;
240 case P9_TREADDIR:
241 ret = p9_proto_treaddir(req, tmsg);
242 break;
243 case P9_TMKDIR:
244 ret = p9_proto_tmkdir(req, tmsg);
245 break;
246 default:
247 LTRACEF("9p T-message type not supported: %u\n", tmsg->msg_type);
248 ret = ERR_NOT_SUPPORTED;
249 goto err;
250 }
251
252 if (ret != NO_ERROR) {
253 LTRACEF("9p T-message (code: %u) failed: %d\n", tmsg->msg_type, ret);
254 goto err;
255 }
256
257 if ((ret = p9_req_finalize(req)) != NO_ERROR) {
258 goto err;
259 }
260
261 virtio_9p_req_send(p9dev, req);
262
263 // wait for server's response
264 if (event_wait_timeout(&req->io_event, VIRTIO_9P_RPC_TIMEOUT) != NO_ERROR) {
265 ret = ERR_TIMED_OUT;
266 goto err;
267 }
268
269 // read the message header from the returned request
270 p9_req_receive(req, rmsg);
271
272 // read the R-message according to its msg-type
273 switch (rmsg->msg_type) {
274 case P9_RLOPEN:
275 ret = p9_proto_rlopen(req, rmsg);
276 break;
277 case P9_RGETATTR:
278 ret = p9_proto_rgetattr(req, rmsg);
279 break;
280 case P9_RVERSION:
281 ret = p9_proto_rversion(req, rmsg);
282 break;
283 case P9_RATTACH:
284 ret = p9_proto_rattach(req, rmsg);
285 break;
286 case P9_RWALK:
287 ret = p9_proto_rwalk(req, rmsg);
288 break;
289 case P9_ROPEN:
290 ret = p9_proto_ropen(req, rmsg);
291 break;
292 case P9_RREAD:
293 ret = p9_proto_rread(req, rmsg);
294 break;
295 case P9_RWRITE:
296 ret = p9_proto_rwrite(req, rmsg);
297 break;
298 case P9_RCLUNK:
299 ret = p9_proto_rclunk(req, rmsg);
300 break;
301 case P9_RREMOVE:
302 ret = p9_proto_rremove(req, rmsg);
303 break;
304 case P9_RLERROR:
305 ret = p9_proto_rlerror(req, rmsg);
306 break;
307 case P9_RLCREATE:
308 ret = p9_proto_rlcreate(req, rmsg);
309 break;
310 case P9_RREADDIR:
311 ret = p9_proto_rreaddir(req, rmsg);
312 break;
313 case P9_RMKDIR:
314 ret = p9_proto_rmkdir(req, rmsg);
315 break;
316 default:
317 LTRACEF("9p R-message type not supported: %u\n", tmsg->msg_type);
318 ret = ERR_NOT_SUPPORTED;
319 goto err;
320 }
321
322 err:
323 p9_req_release(req);
324
325 req_unlock:
326 mutex_release(&p9dev->req_lock);
327
328 return ret;
329 }
330
virtio_9p_msg_destroy(virtio_9p_msg_t * msg)331 void virtio_9p_msg_destroy(virtio_9p_msg_t *msg)
332 {
333 switch (msg->msg_type) {
334 case P9_RVERSION:
335 free(msg->msg.rversion.version);
336 break;
337 case P9_RREAD:
338 free(msg->msg.rread.data);
339 break;
340 case P9_RREADDIR:
341 free(msg->msg.rreaddir.data);
342 break;
343 default:
344 // didn't allocate extra space in the message
345 break;
346 }
347 }
348