1 /*
2  * Copyright (c) 2021-2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/net_buf.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/usb/usb_ch9.h>
13 #include <zephyr/drivers/usb/udc_buf.h>
14 #include "udc_common.h"
15 
16 #include <zephyr/logging/log.h>
17 #if defined(CONFIG_UDC_DRIVER_LOG_LEVEL)
18 #define UDC_COMMON_LOG_LEVEL CONFIG_UDC_DRIVER_LOG_LEVEL
19 #else
20 #define UDC_COMMON_LOG_LEVEL LOG_LEVEL_NONE
21 #endif
22 LOG_MODULE_REGISTER(udc, CONFIG_UDC_DRIVER_LOG_LEVEL);
23 
udc_pool_data_alloc(struct net_buf * const buf,size_t * const size,k_timeout_t timeout)24 static inline uint8_t *udc_pool_data_alloc(struct net_buf *const buf,
25 					   size_t *const size, k_timeout_t timeout)
26 {
27 	struct net_buf_pool *const buf_pool = net_buf_pool_get(buf->pool_id);
28 	struct k_heap *const pool = buf_pool->alloc->alloc_data;
29 	void *b;
30 
31 	*size = ROUND_UP(*size, UDC_BUF_GRANULARITY);
32 	b = k_heap_aligned_alloc(pool, UDC_BUF_ALIGN, *size, timeout);
33 	if (b == NULL) {
34 		*size = 0;
35 		return NULL;
36 	}
37 
38 	return b;
39 }
40 
udc_pool_data_unref(struct net_buf * buf,uint8_t * const data)41 static inline void udc_pool_data_unref(struct net_buf *buf, uint8_t *const data)
42 {
43 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
44 	struct k_heap *pool = buf_pool->alloc->alloc_data;
45 
46 	k_heap_free(pool, data);
47 }
48 
49 const struct net_buf_data_cb net_buf_dma_cb = {
50 	.alloc = udc_pool_data_alloc,
51 	.unref = udc_pool_data_unref,
52 };
53 
54 static inline void udc_buf_destroy(struct net_buf *buf);
55 
56 UDC_BUF_POOL_VAR_DEFINE(udc_ep_pool,
57 			CONFIG_UDC_BUF_COUNT, CONFIG_UDC_BUF_POOL_SIZE,
58 			sizeof(struct udc_buf_info), udc_buf_destroy);
59 
60 #define USB_EP_LUT_IDX(ep) (USB_EP_DIR_IS_IN(ep) ? (ep & BIT_MASK(4)) + 16 : \
61 						   ep & BIT_MASK(4))
62 
udc_set_suspended(const struct device * dev,const bool value)63 void udc_set_suspended(const struct device *dev, const bool value)
64 {
65 	struct udc_data *data = dev->data;
66 
67 	if (value == udc_is_suspended(dev)) {
68 		LOG_WRN("Spurious %s event", value ? "suspend" : "resume");
69 	}
70 
71 	atomic_set_bit_to(&data->status, UDC_STATUS_SUSPENDED, value);
72 }
73 
udc_get_ep_cfg(const struct device * dev,const uint8_t ep)74 struct udc_ep_config *udc_get_ep_cfg(const struct device *dev, const uint8_t ep)
75 {
76 	struct udc_data *data = dev->data;
77 
78 	return data->ep_lut[USB_EP_LUT_IDX(ep)];
79 }
80 
udc_ep_is_busy(const struct udc_ep_config * const ep_cfg)81 bool udc_ep_is_busy(const struct udc_ep_config *const ep_cfg)
82 {
83 	return ep_cfg->stat.busy;
84 }
85 
udc_ep_set_busy(struct udc_ep_config * const ep_cfg,const bool busy)86 void udc_ep_set_busy(struct udc_ep_config *const ep_cfg, const bool busy)
87 {
88 	ep_cfg->stat.busy = busy;
89 }
90 
udc_register_ep(const struct device * dev,struct udc_ep_config * const cfg)91 int udc_register_ep(const struct device *dev, struct udc_ep_config *const cfg)
92 {
93 	struct udc_data *data = dev->data;
94 	uint8_t idx;
95 
96 	if (udc_is_initialized(dev)) {
97 		return -EACCES;
98 	}
99 
100 	idx = USB_EP_LUT_IDX(cfg->addr);
101 	__ASSERT_NO_MSG(idx < ARRAY_SIZE(data->ep_lut));
102 
103 	data->ep_lut[idx] = cfg;
104 	k_fifo_init(&cfg->fifo);
105 
106 	return 0;
107 }
108 
udc_buf_get(struct udc_ep_config * const ep_cfg)109 struct net_buf *udc_buf_get(struct udc_ep_config *const ep_cfg)
110 {
111 	return k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
112 }
113 
udc_buf_get_all(struct udc_ep_config * const ep_cfg)114 struct net_buf *udc_buf_get_all(struct udc_ep_config *const ep_cfg)
115 {
116 	struct net_buf *buf;
117 
118 	buf = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
119 	if (!buf) {
120 		return NULL;
121 	}
122 
123 	LOG_DBG("ep 0x%02x dequeue %p", ep_cfg->addr, buf);
124 	for (struct net_buf *n = buf; !k_fifo_is_empty(&ep_cfg->fifo); n = n->frags) {
125 		n->frags = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
126 		LOG_DBG("|-> %p ", n->frags);
127 		if (n->frags == NULL) {
128 			break;
129 		}
130 	}
131 
132 	return buf;
133 }
134 
udc_buf_peek(struct udc_ep_config * const ep_cfg)135 struct net_buf *udc_buf_peek(struct udc_ep_config *const ep_cfg)
136 {
137 	return k_fifo_peek_head(&ep_cfg->fifo);
138 }
139 
udc_buf_put(struct udc_ep_config * const ep_cfg,struct net_buf * const buf)140 void udc_buf_put(struct udc_ep_config *const ep_cfg,
141 		 struct net_buf *const buf)
142 {
143 	k_fifo_put(&ep_cfg->fifo, buf);
144 }
145 
udc_ep_buf_set_setup(struct net_buf * const buf)146 void udc_ep_buf_set_setup(struct net_buf *const buf)
147 {
148 	struct udc_buf_info *bi = udc_get_buf_info(buf);
149 
150 	bi->setup = 1;
151 	bi->data = 0;
152 	bi->status = 0;
153 }
154 
udc_ep_buf_has_zlp(const struct net_buf * const buf)155 bool udc_ep_buf_has_zlp(const struct net_buf *const buf)
156 {
157 	const struct udc_buf_info *bi = udc_get_buf_info(buf);
158 
159 	return bi->zlp;
160 }
161 
udc_ep_buf_clear_zlp(const struct net_buf * const buf)162 void udc_ep_buf_clear_zlp(const struct net_buf *const buf)
163 {
164 	struct udc_buf_info *bi = udc_get_buf_info(buf);
165 
166 	bi->zlp = false;
167 }
168 
udc_submit_event(const struct device * dev,const enum udc_event_type type,const int status)169 int udc_submit_event(const struct device *dev,
170 		     const enum udc_event_type type,
171 		     const int status)
172 {
173 	struct udc_data *data = dev->data;
174 	struct udc_event drv_evt = {
175 		.type = type,
176 		.status = status,
177 		.dev = dev,
178 	};
179 
180 	return data->event_cb(dev, &drv_evt);
181 }
182 
udc_submit_ep_event(const struct device * dev,struct net_buf * const buf,const int err)183 int udc_submit_ep_event(const struct device *dev,
184 			struct net_buf *const buf,
185 			const int err)
186 {
187 	struct udc_buf_info *bi = udc_get_buf_info(buf);
188 	struct udc_data *data = dev->data;
189 	const struct udc_event drv_evt = {
190 		.type = UDC_EVT_EP_REQUEST,
191 		.buf = buf,
192 		.dev = dev,
193 	};
194 
195 	if (!udc_is_initialized(dev)) {
196 		return -EPERM;
197 	}
198 
199 	bi->err = err;
200 
201 	return data->event_cb(dev, &drv_evt);
202 }
203 
ep_attrib_get_transfer(uint8_t attributes)204 static uint8_t ep_attrib_get_transfer(uint8_t attributes)
205 {
206 	return attributes & USB_EP_TRANSFER_TYPE_MASK;
207 }
208 
ep_check_config(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)209 static bool ep_check_config(const struct device *dev,
210 			    const struct udc_ep_config *const cfg,
211 			    const uint8_t ep,
212 			    const uint8_t attributes,
213 			    const uint16_t mps,
214 			    const uint8_t interval)
215 {
216 	bool dir_is_in = USB_EP_DIR_IS_IN(ep);
217 	bool dir_is_out = USB_EP_DIR_IS_OUT(ep);
218 
219 	LOG_DBG("cfg d:%c|%c t:%c|%c|%c|%c, mps %u",
220 		cfg->caps.in ? 'I' : '-',
221 		cfg->caps.out ? 'O' : '-',
222 		cfg->caps.iso ? 'S' : '-',
223 		cfg->caps.bulk ? 'B' : '-',
224 		cfg->caps.interrupt ? 'I' : '-',
225 		cfg->caps.control ? 'C' : '-',
226 		cfg->caps.mps);
227 
228 	if (dir_is_out && !cfg->caps.out) {
229 		return false;
230 	}
231 
232 	if (dir_is_in && !cfg->caps.in) {
233 		return false;
234 	}
235 
236 	if (USB_MPS_EP_SIZE(mps) > USB_MPS_EP_SIZE(cfg->caps.mps)) {
237 		return false;
238 	}
239 
240 	switch (ep_attrib_get_transfer(attributes)) {
241 	case USB_EP_TYPE_BULK:
242 		if (!cfg->caps.bulk) {
243 			return false;
244 		}
245 		break;
246 	case USB_EP_TYPE_INTERRUPT:
247 		if (!cfg->caps.interrupt ||
248 		    (USB_MPS_ADDITIONAL_TRANSACTIONS(mps) &&
249 		     !cfg->caps.high_bandwidth)) {
250 			return false;
251 		}
252 		break;
253 	case USB_EP_TYPE_ISO:
254 		if (!cfg->caps.iso ||
255 		    (USB_MPS_ADDITIONAL_TRANSACTIONS(mps) &&
256 		     !cfg->caps.high_bandwidth)) {
257 			return false;
258 		}
259 		break;
260 	case USB_EP_TYPE_CONTROL:
261 		if (!cfg->caps.control) {
262 			return false;
263 		}
264 		break;
265 	default:
266 		return false;
267 	}
268 
269 	return true;
270 }
271 
ep_update_mps(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t attributes,uint16_t * const mps)272 static void ep_update_mps(const struct device *dev,
273 			  const struct udc_ep_config *const cfg,
274 			  const uint8_t attributes,
275 			  uint16_t *const mps)
276 {
277 	struct udc_device_caps caps = udc_caps(dev);
278 	const uint16_t spec_int_mps = caps.hs ? 1024 : 64;
279 	const uint16_t spec_bulk_mps = caps.hs ? 512 : 64;
280 
281 	/*
282 	 * TODO: It does not take into account the actual speed of the
283 	 * bus after the RESET. Should be fixed/improved when the driver
284 	 * for high speed controller are ported.
285 	 */
286 	switch (ep_attrib_get_transfer(attributes)) {
287 	case USB_EP_TYPE_BULK:
288 		*mps = MIN(cfg->caps.mps, spec_bulk_mps);
289 		break;
290 	case USB_EP_TYPE_INTERRUPT:
291 		*mps = MIN(cfg->caps.mps, spec_int_mps);
292 		break;
293 	case USB_EP_TYPE_CONTROL:
294 		__fallthrough;
295 	case USB_EP_TYPE_ISO:
296 		__fallthrough;
297 	default:
298 		return;
299 	}
300 }
301 
udc_ep_try_config(const struct device * dev,const uint8_t ep,const uint8_t attributes,uint16_t * const mps,const uint8_t interval)302 int udc_ep_try_config(const struct device *dev,
303 		      const uint8_t ep,
304 		      const uint8_t attributes,
305 		      uint16_t *const mps,
306 		      const uint8_t interval)
307 {
308 	const struct udc_api *api = dev->api;
309 	struct udc_ep_config *cfg;
310 	bool ret;
311 
312 	cfg = udc_get_ep_cfg(dev, ep);
313 	if (cfg == NULL) {
314 		return -ENODEV;
315 	}
316 
317 	api->lock(dev);
318 
319 	ret = ep_check_config(dev, cfg, ep, attributes, *mps, interval);
320 	if (ret == true && *mps == 0U) {
321 		ep_update_mps(dev, cfg, attributes, mps);
322 	}
323 
324 	api->unlock(dev);
325 
326 	return (ret == false) ? -ENOTSUP : 0;
327 }
328 
udc_ep_enable_internal(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)329 int udc_ep_enable_internal(const struct device *dev,
330 			   const uint8_t ep,
331 			   const uint8_t attributes,
332 			   const uint16_t mps,
333 			   const uint8_t interval)
334 {
335 	const struct udc_api *api = dev->api;
336 	struct udc_ep_config *cfg;
337 	int ret;
338 
339 	cfg = udc_get_ep_cfg(dev, ep);
340 	if (cfg == NULL) {
341 		return -ENODEV;
342 	}
343 
344 	if (cfg->stat.enabled) {
345 		LOG_ERR("ep 0x%02x already enabled", cfg->addr);
346 		return -EALREADY;
347 	}
348 
349 	if (!ep_check_config(dev, cfg, ep, attributes, mps, interval)) {
350 		LOG_ERR("Endpoint 0x%02x validation failed", cfg->addr);
351 		return -ENODEV;
352 	}
353 
354 	cfg->attributes = attributes;
355 	cfg->mps = mps;
356 	cfg->interval = interval;
357 
358 	cfg->stat.odd = 0;
359 	cfg->stat.halted = 0;
360 	cfg->stat.data1 = false;
361 	ret = api->ep_enable(dev, cfg);
362 	cfg->stat.enabled = ret ? false : true;
363 
364 	return ret;
365 }
366 
udc_ep_enable(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)367 int udc_ep_enable(const struct device *dev,
368 		  const uint8_t ep,
369 		  const uint8_t attributes,
370 		  const uint16_t mps,
371 		  const uint8_t interval)
372 {
373 	const struct udc_api *api = dev->api;
374 	int ret;
375 
376 	if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
377 		return -EINVAL;
378 	}
379 
380 	api->lock(dev);
381 
382 	if (!udc_is_enabled(dev)) {
383 		ret = -EPERM;
384 		goto ep_enable_error;
385 	}
386 
387 	ret = udc_ep_enable_internal(dev, ep, attributes, mps, interval);
388 
389 ep_enable_error:
390 	api->unlock(dev);
391 
392 	return ret;
393 }
394 
udc_ep_disable_internal(const struct device * dev,const uint8_t ep)395 int udc_ep_disable_internal(const struct device *dev, const uint8_t ep)
396 {
397 	const struct udc_api *api = dev->api;
398 	struct udc_ep_config *cfg;
399 	int ret;
400 
401 	cfg = udc_get_ep_cfg(dev, ep);
402 	if (cfg == NULL) {
403 		return -ENODEV;
404 	}
405 
406 	if (!cfg->stat.enabled) {
407 		LOG_ERR("ep 0x%02x already disabled", cfg->addr);
408 		return -EALREADY;
409 	}
410 
411 	ret = api->ep_disable(dev, cfg);
412 	cfg->stat.enabled = ret ? cfg->stat.enabled : false;
413 
414 	return ret;
415 }
416 
udc_ep_disable(const struct device * dev,const uint8_t ep)417 int udc_ep_disable(const struct device *dev, const uint8_t ep)
418 {
419 	const struct udc_api *api = dev->api;
420 	int ret;
421 
422 	if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
423 		return -EINVAL;
424 	}
425 
426 	api->lock(dev);
427 
428 	if (!udc_is_initialized(dev)) {
429 		ret = -EPERM;
430 		goto ep_disable_error;
431 	}
432 
433 	ret = udc_ep_disable_internal(dev, ep);
434 
435 ep_disable_error:
436 	api->unlock(dev);
437 
438 	return ret;
439 }
440 
udc_ep_set_halt(const struct device * dev,const uint8_t ep)441 int udc_ep_set_halt(const struct device *dev, const uint8_t ep)
442 {
443 	const struct udc_api *api = dev->api;
444 	struct udc_ep_config *cfg;
445 	int ret;
446 
447 	api->lock(dev);
448 
449 	if (!udc_is_enabled(dev)) {
450 		ret = -EPERM;
451 		goto ep_set_halt_error;
452 	}
453 
454 	cfg = udc_get_ep_cfg(dev, ep);
455 	if (cfg == NULL) {
456 		ret = -ENODEV;
457 		goto ep_set_halt_error;
458 	}
459 
460 	if (!cfg->stat.enabled) {
461 		ret = -ENODEV;
462 		goto ep_set_halt_error;
463 	}
464 
465 	if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
466 		ret = -ENOTSUP;
467 		goto ep_set_halt_error;
468 	}
469 
470 	ret = api->ep_set_halt(dev, cfg);
471 
472 ep_set_halt_error:
473 	api->unlock(dev);
474 
475 	return ret;
476 }
477 
udc_ep_clear_halt(const struct device * dev,const uint8_t ep)478 int udc_ep_clear_halt(const struct device *dev, const uint8_t ep)
479 {
480 	const struct udc_api *api = dev->api;
481 	struct udc_ep_config *cfg;
482 	int ret;
483 
484 	api->lock(dev);
485 
486 	if (!udc_is_enabled(dev)) {
487 		ret = -EPERM;
488 		goto ep_clear_halt_error;
489 	}
490 
491 	cfg = udc_get_ep_cfg(dev, ep);
492 	if (cfg == NULL) {
493 		ret = -ENODEV;
494 		goto ep_clear_halt_error;
495 	}
496 
497 	if (!cfg->stat.enabled) {
498 		ret = -ENODEV;
499 		goto ep_clear_halt_error;
500 	}
501 
502 	if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
503 		ret = -ENOTSUP;
504 		goto ep_clear_halt_error;
505 	}
506 
507 	ret = api->ep_clear_halt(dev, cfg);
508 	if (ret == 0) {
509 		cfg->stat.halted = false;
510 	}
511 
512 ep_clear_halt_error:
513 	api->unlock(dev);
514 
515 	return ret;
516 }
517 
udc_debug_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg)518 static void udc_debug_ep_enqueue(const struct device *dev,
519 				 struct udc_ep_config *const cfg)
520 {
521 	struct udc_buf_info *bi;
522 	struct net_buf *buf;
523 	sys_slist_t list;
524 
525 	list.head = k_fifo_peek_head(&cfg->fifo);
526 	list.tail = k_fifo_peek_tail(&cfg->fifo);
527 	if (list.head == NULL) {
528 		LOG_DBG("ep 0x%02x queue is empty", cfg->addr);
529 		return;
530 	}
531 
532 	LOG_DBG("[de]queue ep 0x%02x:", cfg->addr);
533 
534 	SYS_SLIST_FOR_EACH_CONTAINER(&list, buf, node) {
535 		bi = udc_get_buf_info(buf);
536 		LOG_DBG("|-> %p (%u) ->", buf, buf->size);
537 	}
538 }
539 
udc_ep_enqueue(const struct device * dev,struct net_buf * const buf)540 int udc_ep_enqueue(const struct device *dev, struct net_buf *const buf)
541 {
542 	const struct udc_api *api = dev->api;
543 	struct udc_ep_config *cfg;
544 	struct udc_buf_info *bi;
545 	int ret;
546 
547 	api->lock(dev);
548 
549 	if (!udc_is_enabled(dev)) {
550 		ret = -EPERM;
551 		goto ep_enqueue_error;
552 	}
553 
554 	bi = udc_get_buf_info(buf);
555 	if (bi->ep == USB_CONTROL_EP_OUT) {
556 		ret = -EPERM;
557 		goto ep_enqueue_error;
558 	}
559 
560 	cfg = udc_get_ep_cfg(dev, bi->ep);
561 	if (cfg == NULL) {
562 		ret = -ENODEV;
563 		goto ep_enqueue_error;
564 	}
565 
566 	if (!cfg->stat.enabled) {
567 		ret = -ENODEV;
568 		goto ep_enqueue_error;
569 	}
570 
571 	LOG_DBG("Queue ep 0x%02x %p len %u", cfg->addr, buf,
572 		USB_EP_DIR_IS_IN(cfg->addr) ? buf->len : buf->size);
573 
574 	bi->setup = 0;
575 	ret = api->ep_enqueue(dev, cfg, buf);
576 
577 ep_enqueue_error:
578 	api->unlock(dev);
579 
580 	return ret;
581 }
582 
udc_ep_dequeue(const struct device * dev,const uint8_t ep)583 int udc_ep_dequeue(const struct device *dev, const uint8_t ep)
584 {
585 	const struct udc_api *api = dev->api;
586 	struct udc_ep_config *cfg;
587 	int ret;
588 
589 	api->lock(dev);
590 
591 	if (!udc_is_initialized(dev)) {
592 		ret = -EPERM;
593 		goto ep_dequeue_error;
594 	}
595 
596 	cfg = udc_get_ep_cfg(dev, ep);
597 	if (cfg == NULL) {
598 		ret = -ENODEV;
599 		goto ep_dequeue_error;
600 	}
601 
602 	if (cfg->stat.enabled || cfg->stat.halted) {
603 		LOG_INF("ep 0x%02x is not halted|disabled", cfg->addr);
604 	}
605 
606 	if (UDC_COMMON_LOG_LEVEL == LOG_LEVEL_DBG) {
607 		udc_debug_ep_enqueue(dev, cfg);
608 	}
609 
610 	if (k_fifo_is_empty(&cfg->fifo)) {
611 		ret = 0;
612 	} else  {
613 		ret = api->ep_dequeue(dev, cfg);
614 	}
615 
616 ep_dequeue_error:
617 	api->unlock(dev);
618 
619 	return ret;
620 }
621 
udc_ep_buf_alloc(const struct device * dev,const uint8_t ep,const size_t size)622 struct net_buf *udc_ep_buf_alloc(const struct device *dev,
623 				 const uint8_t ep,
624 				 const size_t size)
625 {
626 	const struct udc_api *api = dev->api;
627 	struct net_buf *buf = NULL;
628 	struct udc_buf_info *bi;
629 
630 	api->lock(dev);
631 
632 	buf = net_buf_alloc_len(&udc_ep_pool, size, K_NO_WAIT);
633 	if (!buf) {
634 		LOG_ERR("Failed to allocate net_buf %zd", size);
635 		goto ep_alloc_error;
636 	}
637 
638 	bi = udc_get_buf_info(buf);
639 	bi->ep = ep;
640 	LOG_DBG("Allocate net_buf, ep 0x%02x, size %zd", ep, size);
641 
642 ep_alloc_error:
643 	api->unlock(dev);
644 
645 	return buf;
646 }
647 
udc_ctrl_alloc(const struct device * dev,const uint8_t ep,const size_t size)648 struct net_buf *udc_ctrl_alloc(const struct device *dev,
649 			       const uint8_t ep,
650 			       const size_t size)
651 {
652 	/* TODO: for now just pass to udc_buf_alloc() */
653 	return udc_ep_buf_alloc(dev, ep, size);
654 }
655 
udc_buf_destroy(struct net_buf * buf)656 static inline void udc_buf_destroy(struct net_buf *buf)
657 {
658 	/* Adjust level and use together with the log in udc_ep_buf_alloc() */
659 	LOG_DBG("destroy %p", buf);
660 	net_buf_destroy(buf);
661 }
662 
udc_ep_buf_free(const struct device * dev,struct net_buf * const buf)663 int udc_ep_buf_free(const struct device *dev, struct net_buf *const buf)
664 {
665 	const struct udc_api *api = dev->api;
666 	int ret = 0;
667 
668 	api->lock(dev);
669 	net_buf_unref(buf);
670 	api->unlock(dev);
671 
672 	return ret;
673 }
674 
udc_device_speed(const struct device * dev)675 enum udc_bus_speed udc_device_speed(const struct device *dev)
676 {
677 	const struct udc_api *api = dev->api;
678 	enum udc_bus_speed speed = UDC_BUS_UNKNOWN;
679 
680 	api->lock(dev);
681 
682 	if (!udc_is_enabled(dev)) {
683 		goto device_speed_error;
684 	}
685 
686 	if (api->device_speed) {
687 		speed = api->device_speed(dev);
688 	} else {
689 		/* TODO: Shall we track connected status in UDC? */
690 		speed = UDC_BUS_SPEED_FS;
691 	}
692 
693 device_speed_error:
694 	api->unlock(dev);
695 
696 	return speed;
697 }
698 
udc_enable(const struct device * dev)699 int udc_enable(const struct device *dev)
700 {
701 	const struct udc_api *api = dev->api;
702 	struct udc_data *data = dev->data;
703 	int ret;
704 
705 	api->lock(dev);
706 
707 	if (!udc_is_initialized(dev)) {
708 		ret = -EPERM;
709 		goto udc_enable_error;
710 	}
711 
712 	if (udc_is_enabled(dev)) {
713 		ret = -EALREADY;
714 		goto udc_enable_error;
715 	}
716 
717 	data->stage = CTRL_PIPE_STAGE_SETUP;
718 
719 	ret = api->enable(dev);
720 	if (ret == 0) {
721 		atomic_set_bit(&data->status, UDC_STATUS_ENABLED);
722 	}
723 
724 udc_enable_error:
725 	api->unlock(dev);
726 
727 	return ret;
728 }
729 
udc_disable(const struct device * dev)730 int udc_disable(const struct device *dev)
731 {
732 	const struct udc_api *api = dev->api;
733 	struct udc_data *data = dev->data;
734 	int ret;
735 
736 	api->lock(dev);
737 
738 	if (!udc_is_enabled(dev)) {
739 		ret = -EALREADY;
740 		goto udc_disable_error;
741 	}
742 
743 	ret = api->disable(dev);
744 	atomic_clear_bit(&data->status, UDC_STATUS_ENABLED);
745 
746 udc_disable_error:
747 	api->unlock(dev);
748 
749 	return ret;
750 }
751 
udc_init(const struct device * dev,udc_event_cb_t event_cb,const void * const event_ctx)752 int udc_init(const struct device *dev,
753 	     udc_event_cb_t event_cb, const void *const event_ctx)
754 {
755 	const struct udc_api *api = dev->api;
756 	struct udc_data *data = dev->data;
757 	int ret;
758 
759 	if (event_cb == NULL || event_ctx == NULL) {
760 		return -EINVAL;
761 	}
762 
763 	api->lock(dev);
764 
765 	if (udc_is_initialized(dev)) {
766 		ret = -EALREADY;
767 		goto udc_init_error;
768 	}
769 
770 	data->event_cb = event_cb;
771 	data->event_ctx = event_ctx;
772 
773 	ret = api->init(dev);
774 	if (ret == 0) {
775 		atomic_set_bit(&data->status, UDC_STATUS_INITIALIZED);
776 	}
777 
778 udc_init_error:
779 	api->unlock(dev);
780 
781 	return ret;
782 }
783 
udc_shutdown(const struct device * dev)784 int udc_shutdown(const struct device *dev)
785 {
786 	const struct udc_api *api = dev->api;
787 	struct udc_data *data = dev->data;
788 	int ret;
789 
790 	api->lock(dev);
791 
792 	if (udc_is_enabled(dev)) {
793 		ret = -EBUSY;
794 		goto udc_shutdown_error;
795 	}
796 
797 	if (!udc_is_initialized(dev)) {
798 		ret = -EALREADY;
799 		goto udc_shutdown_error;
800 	}
801 
802 	ret = api->shutdown(dev);
803 	atomic_clear_bit(&data->status, UDC_STATUS_INITIALIZED);
804 
805 udc_shutdown_error:
806 	api->unlock(dev);
807 
808 	return ret;
809 }
810 
811 static ALWAYS_INLINE
udc_ctrl_alloc_stage(const struct device * dev,struct net_buf * const parent,const uint8_t ep,const size_t size)812 struct net_buf *udc_ctrl_alloc_stage(const struct device *dev,
813 				     struct net_buf *const parent,
814 				     const uint8_t ep,
815 				     const size_t size)
816 {
817 	struct net_buf *buf;
818 
819 	buf = udc_ctrl_alloc(dev, ep, size);
820 	if (buf == NULL) {
821 		return NULL;
822 	}
823 
824 	if (parent) {
825 		net_buf_frag_add(parent, buf);
826 	}
827 
828 	return buf;
829 }
830 
udc_ctrl_alloc_data(const struct device * dev,struct net_buf * const setup,const uint8_t ep)831 static struct net_buf *udc_ctrl_alloc_data(const struct device *dev,
832 					   struct net_buf *const setup,
833 					   const uint8_t ep)
834 {
835 	size_t size = udc_data_stage_length(setup);
836 	struct udc_buf_info *bi;
837 	struct net_buf *buf;
838 
839 	buf = udc_ctrl_alloc_stage(dev, setup, ep, size);
840 	if (buf) {
841 		bi = udc_get_buf_info(buf);
842 		bi->data = true;
843 	}
844 
845 	return buf;
846 }
847 
udc_ctrl_alloc_status(const struct device * dev,struct net_buf * const parent,const uint8_t ep)848 static struct net_buf *udc_ctrl_alloc_status(const struct device *dev,
849 					     struct net_buf *const parent,
850 					     const uint8_t ep)
851 {
852 	size_t size = (ep == USB_CONTROL_EP_OUT) ? 64 : 0;
853 	struct udc_buf_info *bi;
854 	struct net_buf *buf;
855 
856 	buf = udc_ctrl_alloc_stage(dev, parent, ep, size);
857 	if (buf) {
858 		bi = udc_get_buf_info(buf);
859 		bi->status = true;
860 	}
861 
862 	return buf;
863 }
864 
udc_ctrl_submit_s_out_status(const struct device * dev,struct net_buf * const dout)865 int udc_ctrl_submit_s_out_status(const struct device *dev,
866 			      struct net_buf *const dout)
867 {
868 	struct udc_buf_info *bi = udc_get_buf_info(dout);
869 	struct udc_data *data = dev->data;
870 	struct net_buf *buf;
871 	int ret = 0;
872 
873 	bi->data = true;
874 	net_buf_frag_add(data->setup, dout);
875 
876 	buf = udc_ctrl_alloc_status(dev, dout, USB_CONTROL_EP_IN);
877 	if (buf == NULL) {
878 		ret = -ENOMEM;
879 	}
880 
881 	return udc_submit_ep_event(dev, data->setup, ret);
882 }
883 
udc_ctrl_submit_s_in_status(const struct device * dev)884 int udc_ctrl_submit_s_in_status(const struct device *dev)
885 {
886 	struct udc_data *data = dev->data;
887 	struct net_buf *buf;
888 	int ret = 0;
889 
890 	if (!udc_ctrl_stage_is_data_in(dev)) {
891 		return -ENOTSUP;
892 	}
893 
894 	/* Allocate buffer for data stage IN */
895 	buf = udc_ctrl_alloc_data(dev, data->setup, USB_CONTROL_EP_IN);
896 	if (buf == NULL) {
897 		ret = -ENOMEM;
898 	}
899 
900 	return udc_submit_ep_event(dev, data->setup, ret);
901 }
902 
udc_ctrl_submit_s_status(const struct device * dev)903 int udc_ctrl_submit_s_status(const struct device *dev)
904 {
905 	struct udc_data *data = dev->data;
906 	struct net_buf *buf;
907 	int ret = 0;
908 
909 	/* Allocate buffer for possible status IN */
910 	buf = udc_ctrl_alloc_status(dev, data->setup, USB_CONTROL_EP_IN);
911 	if (buf == NULL) {
912 		ret = -ENOMEM;
913 	}
914 
915 	return udc_submit_ep_event(dev, data->setup, ret);
916 }
917 
udc_ctrl_submit_status(const struct device * dev,struct net_buf * const buf)918 int udc_ctrl_submit_status(const struct device *dev,
919 			   struct net_buf *const buf)
920 {
921 	struct udc_buf_info *bi = udc_get_buf_info(buf);
922 
923 	bi->status = true;
924 
925 	return udc_submit_ep_event(dev, buf, 0);
926 }
927 
udc_ctrl_stage_is_data_out(const struct device * dev)928 bool udc_ctrl_stage_is_data_out(const struct device *dev)
929 {
930 	struct udc_data *data = dev->data;
931 
932 	return data->stage == CTRL_PIPE_STAGE_DATA_OUT ? true : false;
933 }
934 
udc_ctrl_stage_is_data_in(const struct device * dev)935 bool udc_ctrl_stage_is_data_in(const struct device *dev)
936 {
937 	struct udc_data *data = dev->data;
938 
939 	return data->stage == CTRL_PIPE_STAGE_DATA_IN ? true : false;
940 }
941 
udc_ctrl_stage_is_status_out(const struct device * dev)942 bool udc_ctrl_stage_is_status_out(const struct device *dev)
943 {
944 	struct udc_data *data = dev->data;
945 
946 	return data->stage == CTRL_PIPE_STAGE_STATUS_OUT ? true : false;
947 }
948 
udc_ctrl_stage_is_status_in(const struct device * dev)949 bool udc_ctrl_stage_is_status_in(const struct device *dev)
950 {
951 	struct udc_data *data = dev->data;
952 
953 	return data->stage == CTRL_PIPE_STAGE_STATUS_IN ? true : false;
954 }
955 
udc_ctrl_stage_is_no_data(const struct device * dev)956 bool udc_ctrl_stage_is_no_data(const struct device *dev)
957 {
958 	struct udc_data *data = dev->data;
959 
960 	return data->stage == CTRL_PIPE_STAGE_NO_DATA ? true : false;
961 }
962 
udc_data_stage_to_host(const struct net_buf * const buf)963 static bool udc_data_stage_to_host(const struct net_buf *const buf)
964 {
965 	struct usb_setup_packet *setup = (void *)buf->data;
966 
967 	return USB_REQTYPE_GET_DIR(setup->bmRequestType);
968 }
969 
udc_ctrl_update_stage(const struct device * dev,struct net_buf * const buf)970 void udc_ctrl_update_stage(const struct device *dev,
971 			   struct net_buf *const buf)
972 {
973 	struct udc_buf_info *bi = udc_get_buf_info(buf);
974 	struct udc_device_caps caps = udc_caps(dev);
975 	uint8_t next_stage = CTRL_PIPE_STAGE_ERROR;
976 	struct udc_data *data = dev->data;
977 
978 	__ASSERT(USB_EP_GET_IDX(bi->ep) == 0,
979 		 "0x%02x is not a control endpoint", bi->ep);
980 
981 	if (bi->setup && bi->ep == USB_CONTROL_EP_OUT) {
982 		uint16_t length  = udc_data_stage_length(buf);
983 
984 		if (data->stage != CTRL_PIPE_STAGE_SETUP) {
985 			LOG_INF("Sequence %u not completed", data->stage);
986 
987 			if (data->stage == CTRL_PIPE_STAGE_DATA_OUT) {
988 				/*
989 				 * The last setup packet is "floating" because
990 				 * DATA OUT stage was awaited. This setup
991 				 * packet must be removed here because it will
992 				 * never reach the stack.
993 				 */
994 				LOG_INF("Drop setup packet (%p)", (void *)data->setup);
995 				net_buf_unref(data->setup);
996 			}
997 
998 			data->stage = CTRL_PIPE_STAGE_SETUP;
999 		}
1000 
1001 		data->setup = buf;
1002 
1003 		/*
1004 		 * Setup Stage has been completed (setup packet received),
1005 		 * regardless of the previous stage, this is now being reset.
1006 		 * Next state depends on wLength and the direction bit (D7).
1007 		 */
1008 		if (length == 0) {
1009 			/*
1010 			 * No Data Stage, next is Status Stage
1011 			 * complete sequence: s->status
1012 			 */
1013 			LOG_DBG("s->(status)");
1014 			next_stage = CTRL_PIPE_STAGE_NO_DATA;
1015 		} else if (udc_data_stage_to_host(buf)) {
1016 			/*
1017 			 * Next is Data Stage (to host / IN)
1018 			 * complete sequence: s->in->status
1019 			 */
1020 			LOG_DBG("s->(in)");
1021 			next_stage = CTRL_PIPE_STAGE_DATA_IN;
1022 		} else {
1023 			/*
1024 			 * Next is Data Stage (to device / OUT)
1025 			 * complete sequence: s->out->status
1026 			 */
1027 			LOG_DBG("s->(out)");
1028 			next_stage = CTRL_PIPE_STAGE_DATA_OUT;
1029 		}
1030 
1031 	} else if (bi->ep == USB_CONTROL_EP_OUT) {
1032 		if (data->stage == CTRL_PIPE_STAGE_DATA_OUT) {
1033 			/*
1034 			 * Next sequence is Status Stage if request is okay,
1035 			 * (IN ZLP status to host)
1036 			 */
1037 			next_stage = CTRL_PIPE_STAGE_STATUS_IN;
1038 		} else if (data->stage == CTRL_PIPE_STAGE_STATUS_OUT) {
1039 			/*
1040 			 * End of a sequence: s->in->status,
1041 			 * We should check the length here because we always
1042 			 * submit a OUT request with the minimum length
1043 			 * of the control endpoint.
1044 			 */
1045 			if (buf->len == 0) {
1046 				LOG_DBG("s-in-status");
1047 				next_stage = CTRL_PIPE_STAGE_SETUP;
1048 			} else {
1049 				LOG_WRN("ZLP expected");
1050 				next_stage = CTRL_PIPE_STAGE_ERROR;
1051 			}
1052 		} else {
1053 			LOG_ERR("Cannot determine the next stage");
1054 			next_stage = CTRL_PIPE_STAGE_ERROR;
1055 		}
1056 
1057 	} else { /* if (bi->ep == USB_CONTROL_EP_IN) */
1058 		if (data->stage == CTRL_PIPE_STAGE_STATUS_IN) {
1059 			/*
1060 			 * End of a sequence: setup->out->in
1061 			 */
1062 			LOG_DBG("s-out-status");
1063 			next_stage = CTRL_PIPE_STAGE_SETUP;
1064 		} else if (data->stage == CTRL_PIPE_STAGE_DATA_IN) {
1065 			/*
1066 			 * Data IN stage completed, next sequence
1067 			 * is Status Stage (OUT ZLP status to device).
1068 			 * over-engineered controllers can send status
1069 			 * on their own, skip this state then.
1070 			 */
1071 			if (caps.out_ack) {
1072 				LOG_DBG("s-in->[status]");
1073 				next_stage = CTRL_PIPE_STAGE_SETUP;
1074 			} else {
1075 				LOG_DBG("s-in->(status)");
1076 				next_stage = CTRL_PIPE_STAGE_STATUS_OUT;
1077 			}
1078 		} else if (data->stage == CTRL_PIPE_STAGE_NO_DATA) {
1079 			/*
1080 			 * End of a sequence (setup->in)
1081 			 * Previous NO Data stage was completed and
1082 			 * we confirmed it with an IN ZLP.
1083 			 */
1084 			LOG_DBG("s-status");
1085 			next_stage = CTRL_PIPE_STAGE_SETUP;
1086 		} else {
1087 			LOG_ERR("Cannot determine the next stage");
1088 			next_stage = CTRL_PIPE_STAGE_ERROR;
1089 		}
1090 	}
1091 
1092 
1093 	if (next_stage == data->stage) {
1094 		LOG_WRN("State not changed!");
1095 	}
1096 
1097 	data->stage = next_stage;
1098 }
1099 
1100 #if defined(CONFIG_UDC_WORKQUEUE)
1101 K_KERNEL_STACK_DEFINE(udc_work_q_stack, CONFIG_UDC_WORKQUEUE_STACK_SIZE);
1102 
1103 struct k_work_q udc_work_q;
1104 
udc_work_q_init(void)1105 static int udc_work_q_init(void)
1106 {
1107 
1108 	k_work_queue_start(&udc_work_q,
1109 			   udc_work_q_stack,
1110 			   K_KERNEL_STACK_SIZEOF(udc_work_q_stack),
1111 			   CONFIG_UDC_WORKQUEUE_PRIORITY, NULL);
1112 	k_thread_name_set(&udc_work_q.thread, "udc_work_q");
1113 
1114 	return 0;
1115 }
1116 
1117 SYS_INIT(udc_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1118 #endif
1119