1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-08-22 Emuzit first version
9 */
10 #include <rthw.h>
11 #include <drivers/usb_common.h>
12 #include <drivers/usb_device.h>
13 #include "ch56x_usbhs.h"
14 #include "ch56x_sys.h"
15 #include "isr_sp.h"
16
17 /*--------------------------------------------------------*/
18 /* Warning : Not fully tested, use at your own discretion */
19 /*--------------------------------------------------------*/
20
21 #ifdef SOC_SERIES_CH569
22 #define _attr_uepdma __attribute__((section(".dmadata"), aligned(16)))
23 #define _ep0_setup_dmabuf _dmadata_start
24 #else
25 #define _attr_uepdma __attribute__((aligned(4)))
26 #define _ep0_setup_dmabuf _dmadata_start
27 #define usbhs_irq_handler usb1_irq_handler
28 #define USBHS_IRQn USB1_IRQn
29 #define USBHS_REG_BASE USB1_REG_BASE
30 #define RAMX_BASE_ADDRESS RAMS_BASE_ADDRESS
31 #define UEP0_RT_DMA UEP_DMA[0]
32 #endif
33
34 #define UEP_MPS_64 64
35 #define UEP_MPS_512 512
36
37 #define _get_ep_idx(address) ((address) & USB_EPNO_MASK)
38 #define _get_ep_dir(address) ((address) & USB_DIR_MASK)
39
40 #define uep_dir_is_in(address) (_get_ep_dir(address) == USB_DIR_IN)
41 #define uep_dir_is_out(address) (_get_ep_dir(address) == USB_DIR_OUT)
42
43 extern uint32_t _dmadata_start[];
44
45 static uint32_t ep0_dmabuf[UEP_MPS_64 / 4] _attr_uepdma;
46 static uint32_t epx_dmabuf[UEP_ADDRESS_MAX][UEP_MPS_512 / 4] _attr_uepdma;
47
48 static struct ep_id usbhs_ep_pool[] =
49 {
50 {0x0, USB_EP_ATTR_CONTROL, USB_DIR_INOUT, 64, ID_ASSIGNED},
51 {0x1, USB_EP_ATTR_BULK, USB_DIR_IN, 512, ID_UNASSIGNED},
52 {0x1, USB_EP_ATTR_BULK, USB_DIR_OUT, 512, ID_UNASSIGNED},
53 {0x2, USB_EP_ATTR_INT, USB_DIR_IN, 512, ID_UNASSIGNED},
54 {0x2, USB_EP_ATTR_INT, USB_DIR_OUT, 512, ID_UNASSIGNED},
55 {0x3, USB_EP_ATTR_BULK, USB_DIR_IN, 512, ID_UNASSIGNED},
56 {0x3, USB_EP_ATTR_BULK, USB_DIR_OUT, 512, ID_UNASSIGNED},
57 #ifdef SOC_SERIES_CH569
58 /* FIXME: not sure how to deal with EP4, no UEP4_DMA register */
59 {0x4, USB_EP_ATTR_INT, USB_DIR_IN, 512, ID_UNASSIGNED},
60 {0x4, USB_EP_ATTR_INT, USB_DIR_OUT, 512, ID_UNASSIGNED},
61 {0x5, USB_EP_ATTR_BULK, USB_DIR_IN, 512, ID_UNASSIGNED},
62 {0x5, USB_EP_ATTR_BULK, USB_DIR_OUT, 512, ID_UNASSIGNED},
63 {0x6, USB_EP_ATTR_INT, USB_DIR_IN, 512, ID_UNASSIGNED},
64 {0x6, USB_EP_ATTR_INT, USB_DIR_OUT, 512, ID_UNASSIGNED},
65 {0x7, USB_EP_ATTR_BULK, USB_DIR_IN, 512, ID_UNASSIGNED},
66 {0x7, USB_EP_ATTR_BULK, USB_DIR_OUT, 512, ID_UNASSIGNED},
67 #endif
68 {0xff, USB_EP_ATTR_TYPE_MASK, USB_DIR_MASK, 0, ID_ASSIGNED},
69 };
70
71 static struct udcd udc_device;
72
73 static uint8_t setup_set_address;
74
udc_set_address(uint8_t address)75 static rt_err_t udc_set_address(uint8_t address)
76 {
77 /* DEV_AD should be updated after status stage IN token of SET_ADDRESS
78 * such that that IN token could still reach our device.
79 */
80 setup_set_address = address | 0x80;
81 return RT_EOK;
82 }
83
udc_set_config(uint8_t address)84 static rt_err_t udc_set_config(uint8_t address)
85 {
86 return RT_EOK;
87 }
88
udc_ep_set_stall(uint8_t address)89 static rt_err_t udc_ep_set_stall(uint8_t address)
90 {
91 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
92
93 uint8_t ep_idx = _get_ep_idx(address);
94
95 if (uep_dir_is_in(address))
96 usbhs->UEP_CTRL[ep_idx].TX_CTRL.res_mask = UEP_RES_STALL;
97 else
98 usbhs->UEP_CTRL[ep_idx].RX_CTRL.res_mask = UEP_RES_STALL;
99
100 return RT_EOK;
101 }
102
udc_ep_clear_stall(uint8_t address)103 static rt_err_t udc_ep_clear_stall(uint8_t address)
104 {
105 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
106
107 uint8_t ep_idx = _get_ep_idx(address);
108
109 if (uep_dir_is_in(address))
110 usbhs->UEP_CTRL[ep_idx].TX_CTRL.res_mask = UEP_RES_NAK;
111 else
112 usbhs->UEP_CTRL[ep_idx].RX_CTRL.res_mask = UEP_RES_NAK;
113
114 return RT_EOK;
115 }
116
udc_ep_enable(struct uendpoint * ep)117 static rt_err_t udc_ep_enable(struct uendpoint *ep)
118 {
119 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
120
121 uint8_t ep_idx, address, mod;
122
123 RT_ASSERT(ep != RT_NULL);
124 RT_ASSERT(ep->ep_desc != RT_NULL);
125
126 address = EP_ADDRESS(ep);
127 ep_idx = _get_ep_idx(address);
128
129 if (ep_idx > 0 && ep_idx <= UEP_ADDRESS_MAX)
130 {
131 mod = uep_dir_is_in(address) ? RB_UEP_TX_EN : RB_UEP_RX_EN;
132 mod = _uep_mod_get(usbhs, ep_idx) | mod;
133 _uep_mod_set(usbhs, ep_idx, mod);
134 }
135
136 return RT_EOK;
137 }
138
udc_ep_disable(struct uendpoint * ep)139 static rt_err_t udc_ep_disable(struct uendpoint *ep)
140 {
141 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
142
143 uint8_t ep_idx, address, mod;
144
145 RT_ASSERT(ep != RT_NULL);
146 RT_ASSERT(ep->ep_desc != RT_NULL);
147
148 address = EP_ADDRESS(ep);
149 ep_idx = _get_ep_idx(address);
150
151 if (ep_idx > 0 && ep_idx <= UEP_ADDRESS_MAX)
152 {
153 mod = uep_dir_is_in(address) ? RB_UEP_TX_EN : RB_UEP_RX_EN;
154 mod = _uep_mod_get(usbhs, ep_idx) & ~mod;
155 _uep_mod_set(usbhs, ep_idx, mod);
156 }
157
158 return RT_EOK;
159 }
160
udc_ep_read_prepare(uint8_t address,void * buffer,rt_size_t size)161 static rt_ssize_t udc_ep_read_prepare(uint8_t address, void *buffer, rt_size_t size)
162 {
163 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
164
165 uint8_t ep_idx = _get_ep_idx(address);
166
167 uint32_t dmabuf = (uint32_t)buffer;
168
169 if (uep_dir_is_in(address))
170 return 0;
171
172 if (size > (ep_idx ? UEP_MPS_512 : UEP_MPS_64))
173 size = (ep_idx ? UEP_MPS_512 : UEP_MPS_64);
174
175 /* need extra `buffer` copy if H/W requirement not met
176 * CH565/CH569 : DMA buffer resides in RAMX, 16-byte aligned
177 * CH567/CH568 : 4-byte aligned
178 */
179 #ifdef SOC_SERIES_CH569
180 if (size > 0 && (dmabuf < RAMX_BASE_ADDRESS || (dmabuf & 0xf)))
181 {
182 dmabuf = (uint32_t)(ep_idx ? epx_dmabuf[ep_idx] : ep0_dmabuf);
183 }
184 /* Note : usbhs->UEP_RX_DMA[0] maps to usbhs->UEP0_RT_DMA actually */
185 usbhs->UEP_RX_DMA[ep_idx] = dmabuf & UEP_RT_DMA_MASK;
186 #else
187 if (size > 0 && (dmabuf & 3))
188 {
189 dmabuf = (uint32_t)(ep_idx ? epx_dmabuf[ep_idx] : ep0_dmabuf);
190 }
191 usbhs->UEP_DMA[ep_idx] = dmabuf & UEP_RT_DMA_MASK;
192 #endif
193
194 if (ep_idx == 0 && size == 0)
195 {
196 /* SETUP status stage, expect DATA1 */
197 usbhs->UEP_CTRL[0].RX_CTRL.reg = RB_UEP_RES_ACK | RB_UEP_TOG_DATA1;
198 }
199 else
200 {
201 /* keep TOG_MASK & AUTOTOG */
202 usbhs->UEP_CTRL[ep_idx].RX_CTRL.res_mask = UEP_RES_ACK;
203 }
204
205 return size;
206 }
207
udc_ep_read(uint8_t address,void * buffer)208 static rt_ssize_t udc_ep_read(uint8_t address, void *buffer)
209 {
210 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
211
212 uint8_t ep_idx = _get_ep_idx(address);
213
214 uint32_t dmabuf;
215 rt_size_t size;
216
217 if (uep_dir_is_in(address))
218 return 0;
219
220 #ifdef SOC_SERIES_CH569
221 /* Note : usbhs->UEP_RX_DMA[0] maps to usbhs->UEP0_RT_DMA actually */
222 dmabuf = usbhs->UEP_RX_DMA[ep_idx] & UEP_RT_DMA_MASK;
223 #else
224 dmabuf = usbhs->UEP_DMA[ep_idx] & UEP_RT_DMA_MASK;
225 #endif
226 size = usbhs->RX_LEN;
227
228 /* copy if proxy buffer */
229 if (size > 0 && ((uint32_t)buffer & UEP_RT_DMA_MASK) != dmabuf)
230 {
231 dmabuf |= RAMX_BASE_ADDRESS;
232 rt_memcpy(buffer, (void *)dmabuf, size);
233 }
234
235 return size;
236 }
237
udc_ep_write(uint8_t address,void * buffer,rt_size_t size)238 static rt_ssize_t udc_ep_write(uint8_t address, void *buffer, rt_size_t size)
239 {
240 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
241
242 uint8_t ep_idx = _get_ep_idx(address);
243
244 uint32_t dmabuf = (uint32_t)buffer;
245
246 union _uh_rt_ctrl ctrl;
247
248 if (uep_dir_is_out(address))
249 return 0;
250
251 if (size > (ep_idx ? UEP_MPS_512 : UEP_MPS_64))
252 size = (ep_idx ? UEP_MPS_512 : UEP_MPS_64);
253
254 /* need extra `buffer` copy if H/W requirement not met
255 * CH565/CH569 : DMA buffer resides in RAMX, 16-byte aligned
256 * CH567/CH568 : 4-byte aligned
257 */
258 #ifdef SOC_SERIES_CH569
259 if (size > 0 && (dmabuf < RAMX_BASE_ADDRESS || (dmabuf & 0xf)))
260 {
261 dmabuf = (uint32_t)(ep_idx ? epx_dmabuf[ep_idx] : ep0_dmabuf);
262 rt_memcpy((void *)dmabuf, buffer, size);
263 }
264 if (ep_idx == 0)
265 usbhs->UEP0_RT_DMA = dmabuf & UEP_RT_DMA_MASK;
266 else
267 usbhs->UEP_TX_DMA[ep_idx] = dmabuf & UEP_RT_DMA_MASK;
268 #else
269 if (size > 0 && (dmabuf & 3))
270 {
271 dmabuf = (uint32_t)(ep_idx ? epx_dmabuf[ep_idx] : ep0_dmabuf);
272 rt_memcpy((void *)dmabuf, buffer, size);
273 }
274 usbhs->UEP_DMA[ep_idx] = dmabuf & UEP_RT_DMA_MASK;
275 #endif
276 usbhs->UEP_CTRL[ep_idx].t_len = size;
277
278 /* keep TOG_MASK & AUTOTOG */
279 usbhs->UEP_CTRL[ep_idx].TX_CTRL.res_mask = UEP_RES_ACK;
280
281 return size;
282 }
283
udc_ep0_send_status(void)284 static rt_err_t udc_ep0_send_status(void)
285 {
286 volatile struct usbhs_registers *usbhs = (void *)USBHS_REG_BASE;
287
288 /* SETUP status stage : zero data length, always DATA1 */
289 usbhs->UEP_CTRL[0].t_len = 0;
290 /* This is the only case UEP0_RT_DMA is set to 0. */
291 usbhs->UEP0_RT_DMA = 0;
292 usbhs->UEP_CTRL[0].TX_CTRL.reg = RB_UEP_RES_ACK | RB_UEP_TOG_DATA1;
293 return RT_EOK;
294 }
295
udc_suspend(void)296 static rt_err_t udc_suspend(void)
297 {
298 return RT_EOK;
299 }
300
udc_wakeup(void)301 static rt_err_t udc_wakeup(void)
302 {
303 return RT_EOK;
304 }
305
306 static const struct udcd_ops udcd_ops =
307 {
308 .set_address = udc_set_address,
309 .set_config = udc_set_config,
310 .ep_set_stall = udc_ep_set_stall,
311 .ep_clear_stall = udc_ep_clear_stall,
312 .ep_enable = udc_ep_enable,
313 .ep_disable = udc_ep_disable,
314 .ep_read_prepare = udc_ep_read_prepare,
315 .ep_read = udc_ep_read,
316 .ep_write = udc_ep_write,
317 .ep0_send_status = udc_ep0_send_status,
318 .suspend = udc_suspend,
319 .wakeup = udc_wakeup,
320 };
321
_hsbhs_device_mode_init(volatile struct usbhs_registers * usbhs)322 static void _hsbhs_device_mode_init(volatile struct usbhs_registers *usbhs)
323 {
324 uint8_t ep_idx;
325
326 /* disable all endpoints, use single buffer mode (BUF_MOD : 0) */
327 usbhs->UHOST_CTRL.reg = 0;
328 usbhs->SUSPEND.reg = 0;
329 usbhs->R32_UEP_MOD = 0;
330 usbhs->DEV_AD = 0;
331
332 usbhs->CTRL.reg = RB_USB_RESET_SIE | RB_USB_CLR_ALL;
333 usbhs->CTRL.reg = RB_USB_DEVICE_MODE |
334 RB_SPTP_HIGH_SPEED |
335 RB_DEV_PU_EN |
336 RB_USB_INT_BUSY |
337 RB_USB_DMA_EN;
338
339 usbhs->INT_EN.reg = RB_USB_IE_BUSRST |
340 RB_USB_IE_TRANS |
341 RB_USB_IE_FIFOOV |
342 RB_USB_IE_SETUPACT;
343
344 usbhs->UEP_MAX_LEN[0].reg = UEP_MPS_64;
345 /*
346 * It seems EP0 SETUP uses the first 8 bytes of RAMX as dmabuf and
347 * handles DATA0 transfer & ACK on its own. Here we still needs to
348 * RES_NAK TX/RX to block SETUP data stage till dma data is ready.
349 */
350 usbhs->UEP_CTRL[0].TX_CTRL.reg = RB_UEP_RES_NAK | RB_UEP_TOG_DATA1;
351 usbhs->UEP_CTRL[0].RX_CTRL.reg = RB_UEP_RES_NAK | RB_UEP_TOG_DATA1;
352
353 for (ep_idx = 1; ep_idx <= UEP_ADDRESS_MAX; ep_idx++)
354 {
355 usbhs->UEP_MAX_LEN[ep_idx].reg = UEP_MPS_512;
356 /* set to DATA0, remains to be initialized (SET_CONFIGURATION...) */
357 usbhs->UEP_CTRL[ep_idx].TX_CTRL.reg = RB_UEP_RES_NAK | RB_UEP_AUTOTOG;
358 usbhs->UEP_CTRL[ep_idx].RX_CTRL.reg = RB_UEP_RES_NAK | RB_UEP_AUTOTOG;
359 }
360 }
361
udc_device_init(struct rt_device * device)362 static rt_err_t udc_device_init(struct rt_device *device)
363 {
364 volatile struct usbhs_registers *usbhs = device->user_data;
365
366 sys_clk_off_by_irqn(USBHS_IRQn, SYS_SLP_CLK_ON);
367
368 _hsbhs_device_mode_init(usbhs);
369
370 rt_hw_interrupt_umask(USBHS_IRQn);
371
372 return RT_EOK;
373 }
374
375 #ifdef RT_USING_DEVICE_OPS
376 static struct rt_device_ops device_ops;
377 #endif
378
rt_hw_usbd_init(void)379 static int rt_hw_usbd_init(void)
380 {
381 int ret;
382
383 udc_device.parent.type = RT_Device_Class_USBDevice;
384 #ifdef RT_USING_DEVICE_OPS
385 device_ops.init = udc_device_init;
386 udc_device.parent.ops = &device_ops;
387 #else
388 udc_device.parent.init = udc_device_init;
389 #endif
390 udc_device.parent.user_data = (void *)USBHS_REG_BASE;
391 udc_device.ops = &udcd_ops;
392 udc_device.ep_pool = usbhs_ep_pool;
393 udc_device.ep0.id = &usbhs_ep_pool[0];
394 udc_device.device_is_hs = RT_TRUE;
395
396 ret = rt_device_register(&udc_device.parent, "usbd", 0);
397 if (ret == RT_EOK)
398 ret = rt_usb_device_init();
399
400 return ret;
401 }
402 INIT_DEVICE_EXPORT(rt_hw_usbd_init);
403
_uep_tog_datax(uint8_t tog)404 rt_inline uint8_t _uep_tog_datax(uint8_t tog)
405 {
406 /* Note: treat tog as RB_UEP_TOG_DATA0 if not RB_UEP_TOG_DATA1 */
407 return (tog == RB_UEP_TOG_DATA1) ? RB_UEP_TOG_DATA0 : RB_UEP_TOG_DATA1;
408 }
409
_isr_ep_stall(volatile struct usbhs_registers * usbhs)410 static void _isr_ep_stall(volatile struct usbhs_registers *usbhs)
411 {
412 uint8_t ep_idx = usbhs->INT_ST.dev_endp_mask;
413
414 usbhs->UEP_CTRL[ep_idx].TX_CTRL.res_mask == UEP_RES_STALL;
415 usbhs->UEP_CTRL[ep_idx].RX_CTRL.res_mask == UEP_RES_STALL;
416 }
417
_isr_handle_setup(volatile struct usbhs_registers * usbhs)418 static void _isr_handle_setup(volatile struct usbhs_registers *usbhs)
419 {
420 struct urequest setup, *packet;
421
422 uint8_t ep_idx, xctrl, recipient;
423
424 /* RES_NAK to block data stage, will expect or response DATA1 */
425 usbhs->UEP_CTRL[0].TX_CTRL.reg = RB_UEP_RES_NAK | RB_UEP_TOG_DATA1;
426 usbhs->UEP_CTRL[0].RX_CTRL.reg = RB_UEP_RES_NAK | RB_UEP_TOG_DATA1;
427
428 packet = (struct urequest *)_ep0_setup_dmabuf;
429
430 setup.request_type = packet->request_type;
431 setup.bRequest = packet->bRequest;
432 setup.wValue = packet->wValue;
433 setup.wIndex = packet->wIndex;
434 setup.wLength = packet->wLength;
435
436 /* Init data toggle bit. Not sure if it has been done by h/w.*/
437 xctrl = RB_UEP_RES_NAK | RB_UEP_AUTOTOG | RB_UEP_TOG_DATA0;
438 recipient = setup.request_type & USB_REQ_TYPE_RECIPIENT_MASK;
439 if (recipient == USB_REQ_TYPE_DEVICE &&
440 setup.bRequest == USB_REQ_SET_CONFIGURATION)
441 {
442 for (ep_idx = 1; ep_idx <= UEP_ADDRESS_MAX; ep_idx++)
443 {
444 usbhs->UEP_CTRL[ep_idx].TX_CTRL.reg = xctrl;
445 usbhs->UEP_CTRL[ep_idx].RX_CTRL.reg = xctrl;
446 }
447 }
448 else if (recipient == USB_REQ_TYPE_ENDPOINT &&
449 setup.bRequest == USB_REQ_CLEAR_FEATURE &&
450 setup.wValue == USB_EP_HALT)
451 {
452 ep_idx = setup.wIndex;
453 if (ep_idx > 0 && ep_idx <= UEP_ADDRESS_MAX)
454 {
455 usbhs->UEP_CTRL[ep_idx].TX_CTRL.reg = xctrl;
456 usbhs->UEP_CTRL[ep_idx].RX_CTRL.reg = xctrl;
457 }
458 }
459
460 rt_usbd_ep0_setup_handler(&udc_device, &setup);
461 }
462
_isr_handle_transfer(volatile struct usbhs_registers * usbhs)463 static void _isr_handle_transfer(volatile struct usbhs_registers *usbhs)
464 {
465 rt_size_t size;
466
467 uint8_t ep_idx, token, tog;
468
469 ep_idx = usbhs->INT_ST.dev_endp_mask;
470 token = usbhs->INT_ST.dev_token_mask;
471
472 if (ep_idx == 0)
473 {
474 if (token == DEV_TOKEN_IN)
475 {
476 /* UEP0 does not support AUTOTOG, generate DATAx manually */
477 tog = usbhs->UEP_CTRL[0].TX_CTRL.reg & RB_UEP_TOG_MASK;
478 tog = _uep_tog_datax(tog);
479 /* wait for udc_ep_write or udc_ep0_send_status to RES_ACK */
480 usbhs->UEP_CTRL[0].TX_CTRL.reg = RB_UEP_RES_NAK | tog;
481
482 if (setup_set_address != 0 && usbhs->UEP_CTRL[0].t_len == 0)
483 {
484 usbhs->DEV_AD = setup_set_address & 0x7f;
485 setup_set_address = 0;
486 }
487 /* don't call in_handler if send_status */
488 if (usbhs->UEP0_RT_DMA != 0)
489 {
490 rt_usbd_ep0_in_handler(&udc_device);
491 }
492 }
493 else if (token == DEV_TOKEN_OUT)
494 {
495 if (usbhs->INT_ST.st_togok)
496 {
497 /* UEP0 does not support AUTOTOG, generate DATAx manually */
498 tog = usbhs->UEP_CTRL[0].RX_CTRL.reg & RB_UEP_TOG_MASK;
499 tog = _uep_tog_datax(tog);
500 /* wait for udc_ep_read_prepare to RES_ACK */
501 usbhs->UEP_CTRL[0].RX_CTRL.reg = RB_UEP_RES_NAK | tog;
502 rt_usbd_ep0_out_handler(&udc_device, usbhs->RX_LEN);
503 }
504 else
505 {
506 /* Corrupted ACK Handshake => ignore data, keep sequence bit */
507 usbhs->UEP_CTRL[0].RX_CTRL.res_mask = UEP_RES_NAK;
508 }
509 }
510 }
511 else if (token == DEV_TOKEN_IN)
512 {
513 /* wait for udc_ep_write to RES_ACK */
514 usbhs->UEP_CTRL[ep_idx].TX_CTRL.res_mask = UEP_RES_NAK;
515 size = usbhs->UEP_CTRL[ep_idx].t_len;
516 rt_usbd_ep_in_handler(&udc_device, ep_idx | USB_DIR_IN, size);
517 }
518 else if (token == DEV_TOKEN_OUT)
519 {
520 /* wait for udc_ep_read_prepare to RES_ACK */
521 usbhs->UEP_CTRL[ep_idx].RX_CTRL.res_mask = UEP_RES_NAK;
522 /* ignore data if Corrupted ACK Handshake */
523 if (usbhs->INT_ST.st_togok)
524 {
525 /* size:0 to trigger dcd_ep_read() in _data_notify() */
526 rt_usbd_ep_out_handler(&udc_device, ep_idx | USB_DIR_OUT, 0);
527 }
528 }
529 }
530
531 /*
532 * CAVEAT: The usbd design of ch56x relies on instant isr to RES_NAK
533 * UEP_CTRL[n].TX_CTRL/RX_CTRL. A long tarried isr may leave RES_ACK
534 * in UEP_CTRL[n] and starts unintended DMA upon arrival of IN/OUT.
535 */
536 void usbhs_irq_handler(void) __attribute__((interrupt()));
usbhs_irq_handler(void)537 void usbhs_irq_handler(void)
538 {
539 volatile struct usbhs_registers *usbhs;
540 union _usb_int_fg intflag;
541
542 isr_sp_enter();
543 rt_interrupt_enter();
544
545 usbhs = (struct usbhs_registers *)USBHS_REG_BASE;
546 intflag.reg = usbhs->INT_FG.reg;
547
548 if (intflag.fifoov)
549 {
550 /* FIXME: fifo overflow */
551 _isr_ep_stall(usbhs);
552 }
553 else
554 {
555 if (intflag.transfer)
556 _isr_handle_transfer(usbhs);
557
558 if (intflag.setupact)
559 _isr_handle_setup(usbhs);
560 }
561
562 if (intflag.busrst)
563 {
564 _hsbhs_device_mode_init(usbhs);
565 rt_usbd_reset_handler(&udc_device);
566 }
567
568 /* clear all pending intflag (suspend, isoact & nak ignored) */
569 usbhs->INT_FG.reg = intflag.reg;
570
571 rt_interrupt_leave();
572 isr_sp_leave();
573 }
574