1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file  udc_virtual.c
9  * @brief Virtual USB device controller (UDC) driver
10  *
11  * Virtual device controller does not emulate any hardware
12  * and can only communicate with the virtual host controller
13  * through virtual bus.
14  */
15 
16 #include "udc_common.h"
17 #include "../uvb/uvb.h"
18 
19 #include <string.h>
20 #include <stdio.h>
21 
22 #include <zephyr/kernel.h>
23 #include <zephyr/drivers/usb/udc.h>
24 
25 #include <zephyr/logging/log.h>
26 LOG_MODULE_REGISTER(udc_vrt, CONFIG_UDC_DRIVER_LOG_LEVEL);
27 
28 struct udc_vrt_config {
29 	size_t num_of_eps;
30 	struct udc_ep_config *ep_cfg_in;
31 	struct udc_ep_config *ep_cfg_out;
32 	void (*make_thread)(const struct device *dev);
33 	struct uvb_node *dev_node;
34 	int speed_idx;
35 	const char *uhc_name;
36 };
37 
38 struct udc_vrt_data {
39 	struct k_fifo fifo;
40 	struct k_thread thread_data;
41 	uint8_t addr;
42 };
43 
44 struct udc_vrt_event {
45 	sys_snode_t node;
46 	enum uvb_event_type type;
47 	struct uvb_packet *pkt;
48 };
49 
50 K_MEM_SLAB_DEFINE(udc_vrt_slab, sizeof(struct udc_vrt_event),
51 		  16, sizeof(void *));
52 
53 /* Reuse request packet for reply */
vrt_request_reply(const struct device * dev,struct uvb_packet * const pkt,const enum uvb_reply reply)54 static int vrt_request_reply(const struct device *dev,
55 			     struct uvb_packet *const pkt,
56 			     const enum uvb_reply reply)
57 {
58 	const struct udc_vrt_config *config = dev->config;
59 
60 	pkt->reply = reply;
61 
62 	return uvb_reply_pkt(config->dev_node, pkt);
63 }
64 
ctrl_ep_clear_halt(const struct device * dev)65 static void ctrl_ep_clear_halt(const struct device *dev)
66 {
67 	struct udc_ep_config *cfg;
68 
69 	cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
70 	cfg->stat.halted = false;
71 
72 	cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN);
73 	cfg->stat.halted = false;
74 }
75 
vrt_ctrl_feed_dout(const struct device * dev,const size_t length)76 static int vrt_ctrl_feed_dout(const struct device *dev,
77 			      const size_t length)
78 {
79 	struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
80 	struct net_buf *buf;
81 
82 	buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
83 	if (buf == NULL) {
84 		return -ENOMEM;
85 	}
86 
87 	udc_buf_put(ep_cfg, buf);
88 
89 	return 0;
90 }
91 
drop_control_transfers(const struct device * dev)92 static void drop_control_transfers(const struct device *dev)
93 {
94 	struct net_buf *buf;
95 
96 	buf = udc_buf_get_all(udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT));
97 	if (buf != NULL) {
98 		net_buf_unref(buf);
99 	}
100 
101 	buf = udc_buf_get_all(udc_get_ep_cfg(dev, USB_CONTROL_EP_IN));
102 	if (buf != NULL) {
103 		net_buf_unref(buf);
104 	}
105 }
106 
vrt_handle_setup(const struct device * dev,struct uvb_packet * const pkt)107 static int vrt_handle_setup(const struct device *dev,
108 			    struct uvb_packet *const pkt)
109 {
110 	struct net_buf *buf;
111 	int err, ret;
112 
113 	drop_control_transfers(dev);
114 
115 	buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, 8);
116 	if (buf == NULL) {
117 		return -ENOMEM;
118 	}
119 
120 	net_buf_add_mem(buf, pkt->data, pkt->length);
121 	udc_ep_buf_set_setup(buf);
122 	ctrl_ep_clear_halt(dev);
123 
124 	/* Update to next stage of control transfer */
125 	udc_ctrl_update_stage(dev, buf);
126 
127 	if (udc_ctrl_stage_is_data_out(dev)) {
128 		/*  Allocate and feed buffer for data OUT stage */
129 		LOG_DBG("s: %p | feed for -out-", buf);
130 		err = vrt_ctrl_feed_dout(dev, udc_data_stage_length(buf));
131 		if (err == -ENOMEM) {
132 			/*
133 			 * Pass it on to the higher level which will
134 			 * halt control OUT endpoint.
135 			 */
136 			err = udc_submit_ep_event(dev, buf, err);
137 		}
138 	} else if (udc_ctrl_stage_is_data_in(dev)) {
139 		LOG_DBG("s: %p | submit for -in-", buf);
140 		/* Allocate buffer for data IN and submit to upper layer */
141 		err = udc_ctrl_submit_s_in_status(dev);
142 	} else {
143 		LOG_DBG("s:%p | submit for -status", buf);
144 		/*
145 		 * For all other cases we feed with a buffer
146 		 * large enough for setup packet.
147 		 */
148 		err = udc_ctrl_submit_s_status(dev);
149 	}
150 
151 	ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK);
152 
153 	return ret ? ret : err;
154 }
155 
vrt_handle_ctrl_out(const struct device * dev,struct net_buf * const buf)156 static int vrt_handle_ctrl_out(const struct device *dev,
157 			       struct net_buf *const buf)
158 {
159 	int err = 0;
160 
161 	if (udc_ctrl_stage_is_status_out(dev)) {
162 		/* Status stage finished, notify upper layer */
163 		err = udc_ctrl_submit_status(dev, buf);
164 	}
165 
166 	/* Update to next stage of control transfer */
167 	udc_ctrl_update_stage(dev, buf);
168 
169 	if (udc_ctrl_stage_is_status_in(dev)) {
170 		return udc_ctrl_submit_s_out_status(dev, buf);
171 	}
172 
173 	return err;
174 }
175 
vrt_handle_out(const struct device * dev,struct uvb_packet * const pkt)176 static int vrt_handle_out(const struct device *dev,
177 			  struct uvb_packet *const pkt)
178 {
179 	struct udc_ep_config *ep_cfg;
180 	const uint8_t ep = pkt->ep;
181 	struct net_buf *buf;
182 	size_t min_len;
183 	int err = 0;
184 	int ret;
185 
186 	ep_cfg = udc_get_ep_cfg(dev, ep);
187 	if (ep_cfg->stat.halted) {
188 		LOG_DBG("reply STALL ep 0x%02x", ep);
189 		return vrt_request_reply(dev, pkt, UVB_REPLY_STALL);
190 	}
191 
192 	buf = udc_buf_peek(ep_cfg);
193 	if (buf == NULL) {
194 		LOG_DBG("reply NACK ep 0x%02x", ep);
195 		return vrt_request_reply(dev, pkt, UVB_REPLY_NACK);
196 	}
197 
198 	min_len = MIN(pkt->length, net_buf_tailroom(buf));
199 	net_buf_add_mem(buf, pkt->data, min_len);
200 
201 	LOG_DBG("Handle data OUT, %zu | %zu", pkt->length, net_buf_tailroom(buf));
202 
203 	if (net_buf_tailroom(buf) == 0 || pkt->length < udc_mps_ep_size(ep_cfg)) {
204 		buf = udc_buf_get(ep_cfg);
205 
206 		if (ep == USB_CONTROL_EP_OUT) {
207 			err = vrt_handle_ctrl_out(dev, buf);
208 		} else {
209 			err = udc_submit_ep_event(dev, buf, 0);
210 		}
211 	}
212 
213 	ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK);
214 
215 	return ret ? ret : err;
216 }
217 
isr_handle_ctrl_in(const struct device * dev,struct net_buf * const buf)218 static int isr_handle_ctrl_in(const struct device *dev,
219 			      struct net_buf *const buf)
220 {
221 	int err = 0;
222 
223 	if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) {
224 		/* Status stage finished, notify upper layer */
225 		err = udc_ctrl_submit_status(dev, buf);
226 	}
227 
228 	/* Update to next stage of control transfer */
229 	udc_ctrl_update_stage(dev, buf);
230 
231 	if (udc_ctrl_stage_is_status_out(dev)) {
232 		/*
233 		 * IN transfer finished, release buffer,
234 		 * Feed control OUT buffer for status stage.
235 		 */
236 		net_buf_unref(buf);
237 		return vrt_ctrl_feed_dout(dev, 0);
238 	}
239 
240 	return err;
241 }
242 
vrt_handle_in(const struct device * dev,struct uvb_packet * const pkt)243 static int vrt_handle_in(const struct device *dev,
244 			 struct uvb_packet *const pkt)
245 {
246 	struct udc_ep_config *ep_cfg;
247 	const uint8_t ep = pkt->ep;
248 	struct net_buf *buf;
249 	size_t min_len;
250 	int err = 0;
251 	int ret;
252 
253 	ep_cfg = udc_get_ep_cfg(dev, ep);
254 	if (ep_cfg->stat.halted) {
255 		LOG_DBG("reply STALL ep 0x%02x", ep);
256 		return vrt_request_reply(dev, pkt, UVB_REPLY_STALL);
257 	}
258 
259 	buf = udc_buf_peek(ep_cfg);
260 	if (buf == NULL) {
261 		LOG_DBG("reply NACK ep 0x%02x", ep);
262 		return vrt_request_reply(dev, pkt, UVB_REPLY_NACK);
263 	}
264 
265 	LOG_DBG("Handle data IN, %zu | %u | %u",
266 		pkt->length, buf->len, udc_mps_ep_size(ep_cfg));
267 	min_len = MIN(pkt->length, buf->len);
268 	memcpy(pkt->data, buf->data, min_len);
269 	net_buf_pull(buf, min_len);
270 	pkt->length = min_len;
271 
272 	if (buf->len == 0 || pkt->length < udc_mps_ep_size(ep_cfg)) {
273 		if (udc_ep_buf_has_zlp(buf)) {
274 			udc_ep_buf_clear_zlp(buf);
275 			goto continue_in;
276 		}
277 
278 		LOG_DBG("Finish data IN %zu | %u", pkt->length, buf->len);
279 		buf = udc_buf_get(ep_cfg);
280 
281 		if (ep == USB_CONTROL_EP_IN) {
282 			err = isr_handle_ctrl_in(dev, buf);
283 		} else {
284 			err = udc_submit_ep_event(dev, buf, 0);
285 		}
286 	}
287 
288 continue_in:
289 	ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK);
290 
291 	return ret ? ret : err;
292 }
293 
vrt_handle_request(const struct device * dev,struct uvb_packet * const pkt)294 static int vrt_handle_request(const struct device *dev,
295 			      struct uvb_packet *const pkt)
296 {
297 	LOG_DBG("REQUEST event for %p pkt %p", dev, pkt);
298 
299 	if (USB_EP_GET_IDX(pkt->ep) == 0 && pkt->request == UVB_REQUEST_SETUP) {
300 		return vrt_handle_setup(dev, pkt);
301 	}
302 
303 	if (USB_EP_DIR_IS_OUT(pkt->ep) && pkt->request == UVB_REQUEST_DATA) {
304 		return vrt_handle_out(dev, pkt);
305 	}
306 
307 	if (USB_EP_DIR_IS_IN(pkt->ep) && pkt->request == UVB_REQUEST_DATA) {
308 		return vrt_handle_in(dev, pkt);
309 	}
310 
311 	return -ENOTSUP;
312 }
313 
udc_vrt_thread_handler(void * arg)314 static ALWAYS_INLINE void udc_vrt_thread_handler(void *arg)
315 {
316 	const struct device *dev = (const struct device *)arg;
317 	struct udc_vrt_data *priv = udc_get_private(dev);
318 
319 	while (true) {
320 		struct udc_vrt_event *vrt_ev;
321 		int err = 0;
322 
323 		vrt_ev = k_fifo_get(&priv->fifo, K_FOREVER);
324 
325 		switch (vrt_ev->type) {
326 		case UVB_EVT_VBUS_REMOVED:
327 			err = udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0);
328 			break;
329 		case UVB_EVT_VBUS_READY:
330 			err = udc_submit_event(dev, UDC_EVT_VBUS_READY, 0);
331 			break;
332 		case UVB_EVT_SUSPEND:
333 			err = udc_submit_event(dev, UDC_EVT_SUSPEND, 0);
334 			break;
335 		case UVB_EVT_RESUME:
336 			err = udc_submit_event(dev, UDC_EVT_RESUME, 0);
337 			break;
338 		case UVB_EVT_RESET:
339 			err = udc_submit_event(dev, UDC_EVT_RESET, 0);
340 			break;
341 		case UVB_EVT_REQUEST:
342 			err = vrt_handle_request(dev, vrt_ev->pkt);
343 			break;
344 		default:
345 			break;
346 		};
347 
348 		if (err) {
349 			udc_submit_event(dev, UDC_EVT_ERROR, err);
350 		}
351 
352 		k_mem_slab_free(&udc_vrt_slab, (void *)vrt_ev);
353 	}
354 }
355 
vrt_submit_uvb_event(const struct device * dev,const enum uvb_event_type type,struct uvb_packet * const pkt)356 static void vrt_submit_uvb_event(const struct device *dev,
357 				 const enum uvb_event_type type,
358 				 struct uvb_packet *const pkt)
359 {
360 	struct udc_vrt_data *priv = udc_get_private(dev);
361 	struct udc_vrt_event *vrt_ev;
362 	int ret;
363 
364 	ret = k_mem_slab_alloc(&udc_vrt_slab, (void **)&vrt_ev, K_NO_WAIT);
365 	__ASSERT(ret == 0, "Failed to allocate slab");
366 
367 	vrt_ev->type = type;
368 	vrt_ev->pkt = pkt;
369 	k_fifo_put(&priv->fifo, vrt_ev);
370 }
371 
udc_vrt_uvb_cb(const void * const vrt_priv,const enum uvb_event_type type,const void * data)372 static void udc_vrt_uvb_cb(const void *const vrt_priv,
373 			   const enum uvb_event_type type,
374 			   const void *data)
375 {
376 	const struct device *dev = vrt_priv;
377 	struct udc_vrt_data *priv = udc_get_private(dev);
378 	struct uvb_packet *const pkt = (void *)data;
379 
380 	switch (type) {
381 	case UVB_EVT_VBUS_REMOVED:
382 		__fallthrough;
383 	case UVB_EVT_VBUS_READY:
384 		if (udc_is_initialized(dev)) {
385 			vrt_submit_uvb_event(dev, type, NULL);
386 		}
387 		break;
388 	case UVB_EVT_SUSPEND:
389 		__fallthrough;
390 	case UVB_EVT_RESUME:
391 		__fallthrough;
392 	case UVB_EVT_RESET:
393 		if (udc_is_enabled(dev)) {
394 			vrt_submit_uvb_event(dev, type, NULL);
395 		}
396 		break;
397 	case UVB_EVT_REQUEST:
398 		if (udc_is_enabled(dev) && priv->addr == pkt->addr) {
399 			vrt_submit_uvb_event(dev, type, pkt);
400 		}
401 		break;
402 	default:
403 		LOG_ERR("Unknown event for %p", dev);
404 		break;
405 	};
406 }
407 
udc_vrt_ep_enqueue(const struct device * dev,struct udc_ep_config * cfg,struct net_buf * buf)408 static int udc_vrt_ep_enqueue(const struct device *dev,
409 			      struct udc_ep_config *cfg,
410 			      struct net_buf *buf)
411 {
412 	LOG_DBG("%p enqueue %p", dev, buf);
413 	udc_buf_put(cfg, buf);
414 
415 	if (cfg->stat.halted) {
416 		LOG_DBG("ep 0x%02x halted", cfg->addr);
417 		return 0;
418 	}
419 
420 	return 0;
421 }
422 
udc_vrt_ep_dequeue(const struct device * dev,struct udc_ep_config * cfg)423 static int udc_vrt_ep_dequeue(const struct device *dev,
424 			      struct udc_ep_config *cfg)
425 {
426 	unsigned int lock_key;
427 	struct net_buf *buf;
428 
429 	lock_key = irq_lock();
430 	/* Draft dequeue implementation */
431 	buf = udc_buf_get_all(cfg);
432 	if (buf) {
433 		udc_submit_ep_event(dev, buf, -ECONNABORTED);
434 	}
435 	irq_unlock(lock_key);
436 
437 	return 0;
438 }
439 
udc_vrt_ep_enable(const struct device * dev,struct udc_ep_config * cfg)440 static int udc_vrt_ep_enable(const struct device *dev,
441 			     struct udc_ep_config *cfg)
442 {
443 	return 0;
444 }
445 
udc_vrt_ep_disable(const struct device * dev,struct udc_ep_config * cfg)446 static int udc_vrt_ep_disable(const struct device *dev,
447 			      struct udc_ep_config *cfg)
448 {
449 	return 0;
450 }
451 
udc_vrt_ep_set_halt(const struct device * dev,struct udc_ep_config * cfg)452 static int udc_vrt_ep_set_halt(const struct device *dev,
453 			       struct udc_ep_config *cfg)
454 {
455 	LOG_DBG("Set halt ep 0x%02x", cfg->addr);
456 
457 	cfg->stat.halted = true;
458 
459 	return 0;
460 }
461 
udc_vrt_ep_clear_halt(const struct device * dev,struct udc_ep_config * cfg)462 static int udc_vrt_ep_clear_halt(const struct device *dev,
463 				 struct udc_ep_config *cfg)
464 {
465 	cfg->stat.halted = false;
466 
467 	return 0;
468 }
469 
udc_vrt_set_address(const struct device * dev,const uint8_t addr)470 static int udc_vrt_set_address(const struct device *dev, const uint8_t addr)
471 {
472 	struct udc_vrt_data *priv = udc_get_private(dev);
473 
474 	priv->addr = addr;
475 	LOG_DBG("Set new address %u for %p", priv->addr, dev);
476 
477 	return 0;
478 }
479 
udc_vrt_host_wakeup(const struct device * dev)480 static int udc_vrt_host_wakeup(const struct device *dev)
481 {
482 
483 	const struct udc_vrt_config *config = dev->config;
484 
485 	return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT,
486 			   INT_TO_POINTER(UVB_DEVICE_ACT_RWUP));
487 }
488 
udc_vrt_device_speed(const struct device * dev)489 static enum udc_bus_speed udc_vrt_device_speed(const struct device *dev)
490 {
491 	struct udc_data *data = dev->data;
492 
493 	/* FIXME: get actual device speed */
494 	return data->caps.hs ? UDC_BUS_SPEED_HS : UDC_BUS_SPEED_FS;
495 }
496 
udc_vrt_enable(const struct device * dev)497 static int udc_vrt_enable(const struct device *dev)
498 {
499 	const struct udc_vrt_config *config = dev->config;
500 	enum uvb_device_act act;
501 
502 	switch (config->speed_idx) {
503 	case 1:
504 		act = UVB_DEVICE_ACT_FS;
505 		break;
506 	case 2:
507 		act = UVB_DEVICE_ACT_HS;
508 		break;
509 	case 3:
510 		act = UVB_DEVICE_ACT_SS;
511 		break;
512 	case 0:
513 	default:
514 		act = UVB_DEVICE_ACT_LS;
515 		break;
516 	}
517 
518 	return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT,
519 			   INT_TO_POINTER(act));
520 }
521 
udc_vrt_disable(const struct device * dev)522 static int udc_vrt_disable(const struct device *dev)
523 {
524 	const struct udc_vrt_config *config = dev->config;
525 
526 	return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT,
527 			   INT_TO_POINTER(UVB_DEVICE_ACT_REMOVED));
528 }
529 
udc_vrt_init(const struct device * dev)530 static int udc_vrt_init(const struct device *dev)
531 {
532 	const struct udc_vrt_config *config = dev->config;
533 
534 	if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT,
535 				   USB_EP_TYPE_CONTROL, 64, 0)) {
536 		LOG_ERR("Failed to enable control endpoint");
537 		return -EIO;
538 	}
539 
540 	if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN,
541 				   USB_EP_TYPE_CONTROL, 64, 0)) {
542 		LOG_ERR("Failed to enable control endpoint");
543 		return -EIO;
544 	}
545 
546 	return uvb_subscribe(config->uhc_name, config->dev_node);
547 }
548 
udc_vrt_shutdown(const struct device * dev)549 static int udc_vrt_shutdown(const struct device *dev)
550 {
551 	const struct udc_vrt_config *config = dev->config;
552 
553 	if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
554 		LOG_ERR("Failed to disable control endpoint");
555 		return -EIO;
556 	}
557 
558 	if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
559 		LOG_ERR("Failed to disable control endpoint");
560 		return -EIO;
561 	}
562 
563 	return uvb_unsubscribe(config->uhc_name, config->dev_node);
564 }
565 
udc_vrt_driver_preinit(const struct device * dev)566 static int udc_vrt_driver_preinit(const struct device *dev)
567 {
568 	const struct udc_vrt_config *config = dev->config;
569 	struct udc_data *data = dev->data;
570 	struct udc_vrt_data *priv = data->priv;
571 	uint16_t mps = 1023;
572 	int err;
573 
574 	k_mutex_init(&data->mutex);
575 	k_fifo_init(&priv->fifo);
576 
577 	data->caps.rwup = true;
578 	data->caps.mps0 = UDC_MPS0_64;
579 	if (config->speed_idx == 2) {
580 		data->caps.hs = true;
581 		mps = 1024;
582 	}
583 
584 	for (int i = 0; i < config->num_of_eps; i++) {
585 		config->ep_cfg_out[i].caps.out = 1;
586 		if (i == 0) {
587 			config->ep_cfg_out[i].caps.control = 1;
588 			config->ep_cfg_out[i].caps.mps = 64;
589 		} else {
590 			config->ep_cfg_out[i].caps.bulk = 1;
591 			config->ep_cfg_out[i].caps.interrupt = 1;
592 			config->ep_cfg_out[i].caps.iso = 1;
593 			config->ep_cfg_out[i].caps.mps = mps;
594 		}
595 
596 		config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i;
597 		err = udc_register_ep(dev, &config->ep_cfg_out[i]);
598 		if (err != 0) {
599 			LOG_ERR("Failed to register endpoint");
600 			return err;
601 		}
602 	}
603 
604 	for (int i = 0; i < config->num_of_eps; i++) {
605 		config->ep_cfg_in[i].caps.in = 1;
606 		if (i == 0) {
607 			config->ep_cfg_in[i].caps.control = 1;
608 			config->ep_cfg_in[i].caps.mps = 64;
609 		} else {
610 			config->ep_cfg_in[i].caps.bulk = 1;
611 			config->ep_cfg_in[i].caps.interrupt = 1;
612 			config->ep_cfg_in[i].caps.iso = 1;
613 			config->ep_cfg_in[i].caps.mps = mps;
614 		}
615 
616 		config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i;
617 		err = udc_register_ep(dev, &config->ep_cfg_in[i]);
618 		if (err != 0) {
619 			LOG_ERR("Failed to register endpoint");
620 			return err;
621 		}
622 	}
623 
624 	config->dev_node->priv = dev;
625 	config->make_thread(dev);
626 	LOG_INF("Device %p (max. speed %d) belongs to %s",
627 		dev, config->speed_idx, config->uhc_name);
628 
629 	return 0;
630 }
631 
udc_vrt_lock(const struct device * dev)632 static void udc_vrt_lock(const struct device *dev)
633 {
634 	udc_lock_internal(dev, K_FOREVER);
635 }
636 
udc_vrt_unlock(const struct device * dev)637 static void udc_vrt_unlock(const struct device *dev)
638 {
639 	udc_unlock_internal(dev);
640 }
641 
642 static const struct udc_api udc_vrt_api = {
643 	.lock = udc_vrt_lock,
644 	.unlock = udc_vrt_unlock,
645 	.device_speed = udc_vrt_device_speed,
646 	.init = udc_vrt_init,
647 	.enable = udc_vrt_enable,
648 	.disable = udc_vrt_disable,
649 	.shutdown = udc_vrt_shutdown,
650 	.set_address = udc_vrt_set_address,
651 	.host_wakeup = udc_vrt_host_wakeup,
652 	.ep_enable = udc_vrt_ep_enable,
653 	.ep_disable = udc_vrt_ep_disable,
654 	.ep_set_halt = udc_vrt_ep_set_halt,
655 	.ep_clear_halt = udc_vrt_ep_clear_halt,
656 	.ep_enqueue = udc_vrt_ep_enqueue,
657 	.ep_dequeue = udc_vrt_ep_dequeue,
658 };
659 
660 #define DT_DRV_COMPAT zephyr_udc_virtual
661 
662 #define UDC_VRT_DEVICE_DEFINE(n)						\
663 	K_THREAD_STACK_DEFINE(udc_vrt_stack_area_##n,				\
664 			      CONFIG_UDC_VIRTUAL_STACK_SIZE);			\
665 										\
666 	static void udc_vrt_thread_##n(void *dev, void *unused1, void *unused2)	\
667 	{									\
668 		while (1) {							\
669 			udc_vrt_thread_handler(dev);				\
670 		}								\
671 	}									\
672 										\
673 	static void udc_vrt_make_thread_##n(const struct device *dev)		\
674 	{									\
675 		struct udc_vrt_data *priv = udc_get_private(dev);		\
676 										\
677 		k_thread_create(&priv->thread_data,				\
678 			    udc_vrt_stack_area_##n,				\
679 			    K_THREAD_STACK_SIZEOF(udc_vrt_stack_area_##n),	\
680 			    udc_vrt_thread_##n,					\
681 			    (void *)dev, NULL, NULL,				\
682 			    K_PRIO_COOP(CONFIG_UDC_VIRTUAL_THREAD_PRIORITY),	\
683 			    K_ESSENTIAL,					\
684 			    K_NO_WAIT);						\
685 		k_thread_name_set(&priv->thread_data, dev->name);		\
686 	}									\
687 										\
688 	static struct udc_ep_config						\
689 		ep_cfg_out[DT_INST_PROP(n, num_bidir_endpoints)];		\
690 	static struct udc_ep_config						\
691 		ep_cfg_in[DT_INST_PROP(n, num_bidir_endpoints)];		\
692 										\
693 	static struct uvb_node udc_vrt_dev_node##n = {				\
694 		.name = DT_NODE_FULL_NAME(DT_DRV_INST(n)),			\
695 		.notify = udc_vrt_uvb_cb,					\
696 	};									\
697 										\
698 	static const struct udc_vrt_config udc_vrt_config_##n = {		\
699 		.num_of_eps = DT_INST_PROP(n, num_bidir_endpoints),		\
700 		.ep_cfg_in = ep_cfg_out,					\
701 		.ep_cfg_out = ep_cfg_in,					\
702 		.make_thread = udc_vrt_make_thread_##n,				\
703 		.dev_node = &udc_vrt_dev_node##n,				\
704 		.speed_idx = DT_ENUM_IDX(DT_DRV_INST(n), maximum_speed),	\
705 		.uhc_name = DT_NODE_FULL_NAME(DT_INST_PARENT(n)),		\
706 	};									\
707 										\
708 	static struct udc_vrt_data udc_priv_##n = {				\
709 	};									\
710 										\
711 	static struct udc_data udc_data_##n = {					\
712 		.mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex),		\
713 		.priv = &udc_priv_##n,						\
714 	};									\
715 										\
716 	DEVICE_DT_INST_DEFINE(n, udc_vrt_driver_preinit, NULL,			\
717 			      &udc_data_##n, &udc_vrt_config_##n,		\
718 			      POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,	\
719 			      &udc_vrt_api);
720 
721 DT_INST_FOREACH_STATUS_OKAY(UDC_VRT_DEVICE_DEFINE)
722