1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c
4 * Designware DWC2 on-chip full/high speed USB OTG 2.0 device controllers
5 *
6 * Copyright (C) 2009 for Samsung Electronics
7 *
8 * BSP Support for Samsung's UDC driver
9 * available at:
10 * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git
11 *
12 * State machine bugfixes:
13 * Marek Szyprowski <m.szyprowski@samsung.com>
14 *
15 * Ported to u-boot:
16 * Marek Szyprowski <m.szyprowski@samsung.com>
17 * Lukasz Majewski <l.majewski@samsumg.com>
18 */
19
20 #include <cpu_func.h>
21 #include <log.h>
22 #include <linux/bitfield.h>
23 #include <linux/bug.h>
24
25 static u8 clear_feature_num;
26 int clear_feature_flag;
27
28 /* Bulk-Only Mass Storage Reset (class-specific request) */
29 #define GET_MAX_LUN_REQUEST 0xFE
30 #define BOT_RESET_REQUEST 0xFF
31
dwc2_udc_ep0_zlp(struct dwc2_udc * dev)32 static inline void dwc2_udc_ep0_zlp(struct dwc2_udc *dev)
33 {
34 writel(phys_to_bus((unsigned long)usb_ctrl_dma_addr),
35 ®->device_regs.in_endp[EP0_CON].diepdma);
36 writel(FIELD_PREP(DXEPTSIZ_PKTCNT_MASK, 1), ®->device_regs.in_endp[EP0_CON].dieptsiz);
37
38 setbits_le32(®->device_regs.in_endp[EP0_CON].diepctl, DXEPCTL_EPENA | DXEPCTL_CNAK);
39
40 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
41 __func__, readl(®->device_regs.in_endp[EP0_CON].diepctl));
42 dev->ep0state = WAIT_FOR_IN_COMPLETE;
43 }
44
dwc2_udc_pre_setup(void)45 static void dwc2_udc_pre_setup(void)
46 {
47 debug_cond(DEBUG_IN_EP,
48 "%s : Prepare Setup packets.\n", __func__);
49
50 writel(FIELD_PREP(DXEPTSIZ_PKTCNT_MASK, 1) | sizeof(struct usb_ctrlrequest),
51 ®->device_regs.out_endp[EP0_CON].doeptsiz);
52 writel(phys_to_bus((unsigned long)usb_ctrl_dma_addr),
53 ®->device_regs.out_endp[EP0_CON].doepdma);
54
55 setbits_le32(®->device_regs.out_endp[EP0_CON].doepctl, DXEPCTL_EPENA);
56
57 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
58 __func__, readl(®->device_regs.in_endp[EP0_CON].diepctl));
59 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
60 __func__, readl(®->device_regs.out_endp[EP0_CON].doepctl));
61 }
62
dwc2_ep0_complete_out(void)63 static inline void dwc2_ep0_complete_out(void)
64 {
65 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
66 __func__, readl(®->device_regs.in_endp[EP0_CON].diepctl));
67 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
68 __func__, readl(®->device_regs.out_endp[EP0_CON].doepctl));
69
70 debug_cond(DEBUG_IN_EP,
71 "%s : Prepare Complete Out packet.\n", __func__);
72
73 writel(FIELD_PREP(DXEPTSIZ_PKTCNT_MASK, 1) | sizeof(struct usb_ctrlrequest),
74 ®->device_regs.out_endp[EP0_CON].doeptsiz);
75 writel(phys_to_bus((unsigned long)usb_ctrl_dma_addr),
76 ®->device_regs.out_endp[EP0_CON].doepdma);
77
78 setbits_le32(®->device_regs.out_endp[EP0_CON].doepctl, DXEPCTL_EPENA | DXEPCTL_CNAK);
79
80 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
81 __func__, readl(®->device_regs.in_endp[EP0_CON].diepctl));
82 debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
83 __func__, readl(®->device_regs.out_endp[EP0_CON].doepctl));
84 }
85
setdma_rx(struct dwc2_ep * ep,struct dwc2_request * req)86 static int setdma_rx(struct dwc2_ep *ep, struct dwc2_request *req)
87 {
88 u32 *buf, ctrl;
89 u32 length, pktcnt;
90 u32 ep_num = ep_index(ep);
91
92 buf = req->req.buf + req->req.actual;
93 length = min_t(u32, req->req.length - req->req.actual,
94 ep_num ? DMA_BUFFER_SIZE : ep->ep.maxpacket);
95
96 ep->len = length;
97 ep->dma_buf = buf;
98
99 if (ep_num == EP0_CON || length == 0)
100 pktcnt = 1;
101 else
102 pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
103
104 ctrl = readl(®->device_regs.out_endp[ep_num].doepctl);
105
106 invalidate_dcache_range((unsigned long) ep->dma_buf,
107 (unsigned long) ep->dma_buf +
108 ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE));
109
110 writel(phys_to_bus((unsigned long)ep->dma_buf), ®->device_regs.out_endp[ep_num].doepdma);
111 writel(FIELD_PREP(DXEPTSIZ_PKTCNT_MASK, pktcnt) |
112 FIELD_PREP(DXEPTSIZ_XFERSIZE_MASK, length),
113 ®->device_regs.out_endp[ep_num].doeptsiz);
114 writel(DXEPCTL_EPENA | DXEPCTL_CNAK | ctrl, ®->device_regs.out_endp[ep_num].doepctl);
115
116 debug_cond(DEBUG_OUT_EP != 0,
117 "%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
118 "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
119 "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
120 __func__, ep_num,
121 readl(®->device_regs.out_endp[ep_num].doepdma),
122 readl(®->device_regs.out_endp[ep_num].doeptsiz),
123 readl(®->device_regs.out_endp[ep_num].doepctl),
124 buf, pktcnt, length);
125 return 0;
126 }
127
setdma_tx(struct dwc2_ep * ep,struct dwc2_request * req)128 static int setdma_tx(struct dwc2_ep *ep, struct dwc2_request *req)
129 {
130 u32 *buf;
131 u32 length, pktcnt;
132 u32 ep_num = ep_index(ep);
133
134 buf = req->req.buf + req->req.actual;
135 length = req->req.length - req->req.actual;
136
137 if (ep_num == EP0_CON)
138 length = min(length, (u32)ep_maxpacket(ep));
139
140 ep->len = length;
141 ep->dma_buf = buf;
142
143 flush_dcache_range((unsigned long) ep->dma_buf,
144 (unsigned long) ep->dma_buf +
145 ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE));
146
147 if (length == 0)
148 pktcnt = 1;
149 else
150 pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
151
152 /* Flush the endpoint's Tx FIFO */
153 dwc2_flush_tx_fifo(reg, ep->fifo_num);
154
155 writel(phys_to_bus((unsigned long)ep->dma_buf), ®->device_regs.in_endp[ep_num].diepdma);
156 writel(FIELD_PREP(DXEPTSIZ_PKTCNT_MASK, pktcnt) |
157 FIELD_PREP(DXEPTSIZ_XFERSIZE_MASK, length),
158 ®->device_regs.in_endp[ep_num].dieptsiz);
159
160 clrsetbits_le32(®->device_regs.in_endp[ep_num].diepctl,
161 DXEPCTL_TXFNUM_MASK | DXEPCTL_NEXTEP_MASK,
162 FIELD_PREP(DXEPCTL_TXFNUM_MASK, ep->fifo_num) |
163 DXEPCTL_EPENA | DXEPCTL_CNAK);
164
165 debug_cond(DEBUG_IN_EP,
166 "%s:EP%d TX DMA start : DIEPDMA0 = 0x%x,"
167 "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n"
168 "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
169 __func__, ep_num,
170 readl(®->device_regs.in_endp[ep_num].diepdma),
171 readl(®->device_regs.in_endp[ep_num].dieptsiz),
172 readl(®->device_regs.in_endp[ep_num].diepctl),
173 buf, pktcnt, length);
174
175 return length;
176 }
177
complete_rx(struct dwc2_udc * dev,u8 ep_num)178 static void complete_rx(struct dwc2_udc *dev, u8 ep_num)
179 {
180 struct dwc2_ep *ep = &dev->ep[ep_num];
181 struct dwc2_request *req = NULL;
182 u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
183
184 if (list_empty(&ep->queue)) {
185 debug_cond(DEBUG_OUT_EP != 0,
186 "%s: RX DMA done : NULL REQ on OUT EP-%d\n",
187 __func__, ep_num);
188 return;
189
190 }
191
192 req = list_entry(ep->queue.next, struct dwc2_request, queue);
193 ep_tsr = readl(®->device_regs.out_endp[ep_num].doeptsiz);
194
195 if (ep_num == EP0_CON)
196 xfer_size = FIELD_PREP(DIEPTSIZ0_XFERSIZE_MASK, ep_tsr);
197 else
198 xfer_size = FIELD_PREP(DXEPTSIZ_XFERSIZE_MASK, ep_tsr);
199
200 xfer_size = ep->len - xfer_size;
201
202 /*
203 * NOTE:
204 *
205 * Please be careful with proper buffer allocation for USB request,
206 * which needs to be aligned to CONFIG_SYS_CACHELINE_SIZE, not only
207 * with starting address, but also its size shall be a cache line
208 * multiplication.
209 *
210 * This will prevent from corruption of data allocated immediatelly
211 * before or after the buffer.
212 *
213 * For armv7, the cache_v7.c provides proper code to emit "ERROR"
214 * message to warn users.
215 */
216 invalidate_dcache_range((unsigned long) ep->dma_buf,
217 (unsigned long) ep->dma_buf +
218 ROUND(xfer_size, CONFIG_SYS_CACHELINE_SIZE));
219
220 req->req.actual += min(xfer_size, req->req.length - req->req.actual);
221 is_short = !!(xfer_size % ep->ep.maxpacket);
222
223 debug_cond(DEBUG_OUT_EP != 0,
224 "%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
225 "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
226 __func__, ep_num, req->req.actual, req->req.length,
227 is_short, ep_tsr, req->req.length - req->req.actual);
228
229 if (is_short || req->req.actual == req->req.length) {
230 if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
231 debug_cond(DEBUG_OUT_EP != 0, " => Send ZLP\n");
232 dwc2_udc_ep0_zlp(dev);
233 /* packet will be completed in complete_tx() */
234 dev->ep0state = WAIT_FOR_IN_COMPLETE;
235 } else {
236 done(ep, req, 0);
237
238 if (!list_empty(&ep->queue)) {
239 req = list_entry(ep->queue.next,
240 struct dwc2_request, queue);
241 debug_cond(DEBUG_OUT_EP != 0,
242 "%s: Next Rx request start...\n",
243 __func__);
244 setdma_rx(ep, req);
245 }
246 }
247 } else
248 setdma_rx(ep, req);
249 }
250
complete_tx(struct dwc2_udc * dev,u8 ep_num)251 static void complete_tx(struct dwc2_udc *dev, u8 ep_num)
252 {
253 struct dwc2_ep *ep = &dev->ep[ep_num];
254 struct dwc2_request *req;
255 u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
256 u32 last;
257
258 if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) {
259 dev->ep0state = WAIT_FOR_OUT_COMPLETE;
260 dwc2_ep0_complete_out();
261 return;
262 }
263
264 if (list_empty(&ep->queue)) {
265 debug_cond(DEBUG_IN_EP,
266 "%s: TX DMA done : NULL REQ on IN EP-%d\n",
267 __func__, ep_num);
268 return;
269
270 }
271
272 req = list_entry(ep->queue.next, struct dwc2_request, queue);
273
274 ep_tsr = readl(®->device_regs.in_endp[ep_num].dieptsiz);
275
276 xfer_size = ep->len;
277 is_short = (xfer_size < ep->ep.maxpacket);
278 req->req.actual += min(xfer_size, req->req.length - req->req.actual);
279
280 debug_cond(DEBUG_IN_EP,
281 "%s: TX DMA done : ep = %d, tx bytes = %d/%d, "
282 "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n",
283 __func__, ep_num, req->req.actual, req->req.length,
284 is_short, ep_tsr, req->req.length - req->req.actual);
285
286 if (ep_num == 0) {
287 if (dev->ep0state == DATA_STATE_XMIT) {
288 debug_cond(DEBUG_IN_EP,
289 "%s: ep_num = %d, ep0stat =="
290 "DATA_STATE_XMIT\n",
291 __func__, ep_num);
292 last = write_fifo_ep0(ep, req);
293 if (last)
294 dev->ep0state = WAIT_FOR_COMPLETE;
295 } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) {
296 debug_cond(DEBUG_IN_EP,
297 "%s: ep_num = %d, completing request\n",
298 __func__, ep_num);
299 done(ep, req, 0);
300 dev->ep0state = WAIT_FOR_SETUP;
301 } else if (dev->ep0state == WAIT_FOR_COMPLETE) {
302 debug_cond(DEBUG_IN_EP,
303 "%s: ep_num = %d, completing request\n",
304 __func__, ep_num);
305 done(ep, req, 0);
306 dev->ep0state = WAIT_FOR_OUT_COMPLETE;
307 dwc2_ep0_complete_out();
308 } else {
309 debug_cond(DEBUG_IN_EP,
310 "%s: ep_num = %d, invalid ep state\n",
311 __func__, ep_num);
312 }
313 return;
314 }
315
316 if (req->req.actual == req->req.length)
317 done(ep, req, 0);
318
319 if (!list_empty(&ep->queue)) {
320 req = list_entry(ep->queue.next, struct dwc2_request, queue);
321 debug_cond(DEBUG_IN_EP,
322 "%s: Next Tx request start...\n", __func__);
323 setdma_tx(ep, req);
324 }
325 }
326
dwc2_udc_check_tx_queue(struct dwc2_udc * dev,u8 ep_num)327 static inline void dwc2_udc_check_tx_queue(struct dwc2_udc *dev, u8 ep_num)
328 {
329 struct dwc2_ep *ep = &dev->ep[ep_num];
330 struct dwc2_request *req;
331
332 debug_cond(DEBUG_IN_EP,
333 "%s: Check queue, ep_num = %d\n", __func__, ep_num);
334
335 if (!list_empty(&ep->queue)) {
336 req = list_entry(ep->queue.next, struct dwc2_request, queue);
337 debug_cond(DEBUG_IN_EP,
338 "%s: Next Tx request(0x%p) start...\n",
339 __func__, req);
340
341 if (ep_is_in(ep))
342 setdma_tx(ep, req);
343 else
344 setdma_rx(ep, req);
345 } else {
346 debug_cond(DEBUG_IN_EP,
347 "%s: NULL REQ on IN EP-%d\n", __func__, ep_num);
348
349 return;
350 }
351
352 }
353
process_ep_in_intr(struct dwc2_udc * dev)354 static void process_ep_in_intr(struct dwc2_udc *dev)
355 {
356 u32 ep_intr, ep_intr_status;
357 u8 ep_num = 0;
358
359 ep_intr = readl(®->device_regs.daint);
360 debug_cond(DEBUG_IN_EP,
361 "*** %s: EP In interrupt : DAINT = 0x%x\n", __func__, ep_intr);
362
363 ep_intr = FIELD_GET(DAINT_INEP_MASK, ep_intr);
364
365 while (ep_intr) {
366 if (ep_intr & BIT(EP0_CON)) {
367 ep_intr_status = readl(®->device_regs.in_endp[ep_num].diepint);
368 debug_cond(DEBUG_IN_EP,
369 "\tEP%d-IN : DIEPINT = 0x%x\n",
370 ep_num, ep_intr_status);
371
372 /* Interrupt Clear */
373 writel(ep_intr_status, ®->device_regs.in_endp[ep_num].diepint);
374
375 if (ep_intr_status & DIEPMSK_XFERCOMPLMSK) {
376 complete_tx(dev, ep_num);
377
378 if (ep_num == 0) {
379 if (dev->ep0state ==
380 WAIT_FOR_IN_COMPLETE)
381 dev->ep0state = WAIT_FOR_SETUP;
382
383 if (dev->ep0state == WAIT_FOR_SETUP)
384 dwc2_udc_pre_setup();
385
386 /* continue transfer after
387 set_clear_halt for DMA mode */
388 if (clear_feature_flag == 1) {
389 dwc2_udc_check_tx_queue(dev,
390 clear_feature_num);
391 clear_feature_flag = 0;
392 }
393 }
394 }
395 }
396 ep_num++;
397 ep_intr >>= 1;
398 }
399 }
400
process_ep_out_intr(struct dwc2_udc * dev)401 static void process_ep_out_intr(struct dwc2_udc *dev)
402 {
403 u32 ep_intr, ep_intr_status;
404 u8 ep_num = 0;
405 u32 ep_tsr = 0, xfer_size = 0;
406 u32 epsiz_reg = reg->device_regs.out_endp[ep_num].doeptsiz;
407 u32 req_size = sizeof(struct usb_ctrlrequest);
408
409 ep_intr = readl(®->device_regs.daint);
410 debug_cond(DEBUG_OUT_EP != 0,
411 "*** %s: EP OUT interrupt : DAINT = 0x%x\n",
412 __func__, ep_intr);
413
414 ep_intr = FIELD_GET(DAINT_OUTEP_MASK, ep_intr);
415
416 while (ep_intr) {
417 if (ep_intr & BIT(EP0_CON)) {
418 ep_intr_status = readl(®->device_regs.out_endp[ep_num].doepint);
419 debug_cond(DEBUG_OUT_EP != 0,
420 "\tEP%d-OUT : DOEPINT = 0x%x\n",
421 ep_num, ep_intr_status);
422
423 /* Interrupt Clear */
424 writel(ep_intr_status, ®->device_regs.out_endp[ep_num].doepint);
425
426 if (ep_num == 0) {
427 if (ep_intr_status & DOEPMSK_XFERCOMPLMSK) {
428 ep_tsr = readl(&epsiz_reg);
429 xfer_size = ep_tsr & DOEPTSIZ0_XFERSIZE_MASK;
430
431 if (xfer_size == req_size &&
432 dev->ep0state == WAIT_FOR_SETUP) {
433 dwc2_udc_pre_setup();
434 } else if (dev->ep0state !=
435 WAIT_FOR_OUT_COMPLETE) {
436 complete_rx(dev, ep_num);
437 } else {
438 dev->ep0state = WAIT_FOR_SETUP;
439 dwc2_udc_pre_setup();
440 }
441 }
442
443 if (ep_intr_status & DOEPMSK_SETUPMSK) {
444 debug_cond(DEBUG_OUT_EP != 0,
445 "SETUP packet arrived\n");
446 dwc2_handle_ep0(dev);
447 }
448 } else {
449 if (ep_intr_status & DOEPMSK_XFERCOMPLMSK)
450 complete_rx(dev, ep_num);
451 }
452 }
453 ep_num++;
454 ep_intr >>= 1;
455 }
456 }
457
458 /*
459 * usb client interrupt handler.
460 */
dwc2_udc_irq(int irq,void * _dev)461 static int dwc2_udc_irq(int irq, void *_dev)
462 {
463 struct dwc2_udc *dev = _dev;
464 u32 intr_status, gotgint;
465 u32 usb_status, gintmsk;
466 unsigned long flags = 0;
467
468 spin_lock_irqsave(&dev->lock, flags);
469
470 intr_status = readl(®->global_regs.gintsts);
471 gintmsk = readl(®->global_regs.gintmsk);
472
473 debug_cond(DEBUG_ISR,
474 "\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x,"
475 "DAINT : 0x%x, DAINTMSK : 0x%x\n",
476 __func__, intr_status, state_names[dev->ep0state], gintmsk,
477 readl(®->device_regs.daint), readl(®->device_regs.daintmsk));
478
479 if (!intr_status) {
480 spin_unlock_irqrestore(&dev->lock, flags);
481 return IRQ_HANDLED;
482 }
483
484 if (intr_status & GINTSTS_ENUMDONE) {
485 debug_cond(DEBUG_ISR, "\tSpeed Detection interrupt\n");
486
487 writel(GINTSTS_ENUMDONE, ®->global_regs.gintsts);
488 usb_status = FIELD_GET(DSTS_ENUMSPD_MASK, readl(®->device_regs.dsts));
489
490 if (usb_status != DSTS_ENUMSPD_HS) {
491 debug_cond(DEBUG_ISR,
492 "\t\tFull Speed Detection\n");
493 set_max_pktsize(dev, USB_SPEED_FULL);
494
495 } else {
496 debug_cond(DEBUG_ISR,
497 "\t\tHigh Speed Detection : 0x%x\n",
498 usb_status);
499 set_max_pktsize(dev, USB_SPEED_HIGH);
500 }
501 }
502
503 if (intr_status & GINTSTS_ERLYSUSP) {
504 debug_cond(DEBUG_ISR, "\tEarly suspend interrupt\n");
505 writel(GINTSTS_ERLYSUSP, ®->global_regs.gintsts);
506 }
507
508 if (intr_status & GINTSTS_USBSUSP) {
509 usb_status = readl(®->device_regs.dsts);
510 debug_cond(DEBUG_ISR,
511 "\tSuspend interrupt :(DSTS):0x%x\n", usb_status);
512 writel(GINTSTS_USBSUSP, ®->global_regs.gintsts);
513
514 if (dev->gadget.speed != USB_SPEED_UNKNOWN
515 && dev->driver) {
516 if (dev->driver->suspend)
517 dev->driver->suspend(&dev->gadget);
518 }
519 }
520
521 if (intr_status & GINTSTS_OTGINT) {
522 gotgint = readl(®->global_regs.gotgint);
523 debug_cond(DEBUG_ISR,
524 "\tOTG interrupt: (GOTGINT):0x%x\n", gotgint);
525
526 if (gotgint & GOTGINT_SES_END_DET) {
527 debug_cond(DEBUG_ISR, "\t\tSession End Detected\n");
528 /* Let gadget detect disconnected state */
529 if (dev->driver->disconnect) {
530 spin_unlock_irqrestore(&dev->lock, flags);
531 dev->driver->disconnect(&dev->gadget);
532 spin_lock_irqsave(&dev->lock, flags);
533 }
534 }
535 writel(gotgint, ®->global_regs.gotgint);
536 }
537
538 if (intr_status & GINTSTS_WKUPINT) {
539 debug_cond(DEBUG_ISR, "\tResume interrupt\n");
540 writel(GINTSTS_WKUPINT, ®->global_regs.gintsts);
541
542 if (dev->gadget.speed != USB_SPEED_UNKNOWN
543 && dev->driver
544 && dev->driver->resume) {
545
546 dev->driver->resume(&dev->gadget);
547 }
548 }
549
550 if (intr_status & GINTSTS_USBRST) {
551 usb_status = readl(®->global_regs.gotgctl);
552 debug_cond(DEBUG_ISR,
553 "\tReset interrupt - (GOTGCTL):0x%x\n", usb_status);
554 writel(GINTSTS_USBRST, ®->global_regs.gintsts);
555
556 if (usb_status & (GOTGCTL_ASESVLD | GOTGCTL_BSESVLD)) {
557 if (reset_available) {
558 debug_cond(DEBUG_ISR,
559 "\t\tOTG core got reset (%d)!!\n",
560 reset_available);
561 reconfig_usbd(dev);
562 dev->ep0state = WAIT_FOR_SETUP;
563 reset_available = 0;
564 dwc2_udc_pre_setup();
565 } else
566 reset_available = 1;
567
568 } else {
569 reset_available = 1;
570 debug_cond(DEBUG_ISR,
571 "\t\tRESET handling skipped\n");
572 }
573 }
574
575 if (intr_status & GINTSTS_IEPINT)
576 process_ep_in_intr(dev);
577
578 if (intr_status & GINTSTS_OEPINT)
579 process_ep_out_intr(dev);
580
581 spin_unlock_irqrestore(&dev->lock, flags);
582
583 return IRQ_HANDLED;
584 }
585
586 /** Queue one request
587 * Kickstart transfer if needed
588 */
dwc2_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)589 static int dwc2_queue(struct usb_ep *_ep, struct usb_request *_req,
590 gfp_t gfp_flags)
591 {
592 struct dwc2_request *req;
593 struct dwc2_ep *ep;
594 struct dwc2_udc *dev;
595 unsigned long flags = 0;
596 u32 ep_num, gintsts;
597
598 req = container_of(_req, struct dwc2_request, req);
599 if (unlikely(!_req || !_req->complete || !_req->buf
600 || !list_empty(&req->queue))) {
601
602 debug("%s: bad params\n", __func__);
603 return -EINVAL;
604 }
605
606 ep = container_of(_ep, struct dwc2_ep, ep);
607
608 if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
609
610 debug("%s: bad ep: %s, %d, %p\n", __func__,
611 ep->ep.name, !ep->desc, _ep);
612 return -EINVAL;
613 }
614
615 ep_num = ep_index(ep);
616 dev = ep->dev;
617 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
618
619 debug("%s: bogus device state %p\n", __func__, dev->driver);
620 return -ESHUTDOWN;
621 }
622
623 spin_lock_irqsave(&dev->lock, flags);
624
625 _req->status = -EINPROGRESS;
626 _req->actual = 0;
627
628 /* kickstart this i/o queue? */
629 debug("\n*** %s: %s-%s req = %p, len = %d, buf = %p"
630 "Q empty = %d, stopped = %d\n",
631 __func__, _ep->name, ep_is_in(ep) ? "in" : "out",
632 _req, _req->length, _req->buf,
633 list_empty(&ep->queue), ep->stopped);
634
635 #ifdef DEBUG
636 {
637 int i, len = _req->length;
638
639 printf("pkt = ");
640 if (len > 64)
641 len = 64;
642 for (i = 0; i < len; i++) {
643 printf("%02x", ((u8 *)_req->buf)[i]);
644 if ((i & 7) == 7)
645 printf(" ");
646 }
647 printf("\n");
648 }
649 #endif
650
651 if (list_empty(&ep->queue) && !ep->stopped) {
652
653 if (ep_num == 0) {
654 /* EP0 */
655 list_add_tail(&req->queue, &ep->queue);
656 dwc2_ep0_kick(dev, ep);
657 req = 0;
658
659 } else if (ep_is_in(ep)) {
660 gintsts = readl(®->global_regs.gintsts);
661 debug_cond(DEBUG_IN_EP,
662 "%s: ep_is_in, DWC2_UDC_OTG_GINTSTS=0x%x\n",
663 __func__, gintsts);
664
665 setdma_tx(ep, req);
666 } else {
667 gintsts = readl(®->global_regs.gintsts);
668 debug_cond(DEBUG_OUT_EP != 0,
669 "%s:ep_is_out, DWC2_UDC_OTG_GINTSTS=0x%x\n",
670 __func__, gintsts);
671
672 setdma_rx(ep, req);
673 }
674 }
675
676 /* pio or dma irq handler advances the queue. */
677 if (likely(req != 0))
678 list_add_tail(&req->queue, &ep->queue);
679
680 spin_unlock_irqrestore(&dev->lock, flags);
681
682 return 0;
683 }
684
685 /****************************************************************/
686 /* End Point 0 related functions */
687 /****************************************************************/
688
689 /* return: 0 = still running, 1 = completed, negative = errno */
write_fifo_ep0(struct dwc2_ep * ep,struct dwc2_request * req)690 static int write_fifo_ep0(struct dwc2_ep *ep, struct dwc2_request *req)
691 {
692 u32 max;
693 unsigned count;
694 int is_last;
695
696 max = ep_maxpacket(ep);
697
698 debug_cond(DEBUG_EP0 != 0, "%s: max = %d\n", __func__, max);
699
700 count = setdma_tx(ep, req);
701
702 /* last packet is usually short (or a zlp) */
703 if (likely(count != max))
704 is_last = 1;
705 else {
706 if (likely(req->req.length != req->req.actual + count)
707 || req->req.zero)
708 is_last = 0;
709 else
710 is_last = 1;
711 }
712
713 debug_cond(DEBUG_EP0 != 0,
714 "%s: wrote %s %d bytes%s %d left %p\n", __func__,
715 ep->ep.name, count,
716 is_last ? "/L" : "",
717 req->req.length - req->req.actual - count, req);
718
719 /* requests complete when all IN data is in the FIFO */
720 if (is_last) {
721 ep->dev->ep0state = WAIT_FOR_SETUP;
722 return 1;
723 }
724
725 return 0;
726 }
727
dwc2_fifo_read(struct dwc2_ep * ep,void * cp,int max)728 static int dwc2_fifo_read(struct dwc2_ep *ep, void *cp, int max)
729 {
730 invalidate_dcache_range((unsigned long)cp, (unsigned long)cp +
731 ROUND(max, CONFIG_SYS_CACHELINE_SIZE));
732
733 debug_cond(DEBUG_EP0 != 0,
734 "%s: bytes=%d, ep_index=%d 0x%p\n", __func__,
735 max, ep_index(ep), cp);
736
737 return max;
738 }
739
740 /**
741 * udc_set_address - set the USB address for this device
742 * @address:
743 *
744 * Called from control endpoint function
745 * after it decodes a set address setup packet.
746 */
udc_set_address(struct dwc2_udc * dev,unsigned char address)747 static void udc_set_address(struct dwc2_udc *dev, unsigned char address)
748 {
749 setbits_le32(®->device_regs.dcfg, FIELD_PREP(DCFG_DEVADDR_MASK, address));
750
751 dwc2_udc_ep0_zlp(dev);
752
753 debug_cond(DEBUG_EP0 != 0,
754 "%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
755 __func__, address, readl(®->device_regs.dcfg));
756
757 dev->usb_address = address;
758 }
759
dwc2_udc_ep0_set_stall(struct dwc2_ep * ep)760 static inline void dwc2_udc_ep0_set_stall(struct dwc2_ep *ep)
761 {
762 struct dwc2_udc *dev;
763 u32 ep_ctrl = 0;
764
765 dev = ep->dev;
766 ep_ctrl = readl(®->device_regs.in_endp[EP0_CON].diepctl);
767
768 /* set the disable and stall bits */
769 if (ep_ctrl & DXEPCTL_EPENA)
770 ep_ctrl |= DXEPCTL_EPDIS;
771
772 ep_ctrl |= DXEPCTL_STALL;
773
774 writel(ep_ctrl, ®->device_regs.in_endp[EP0_CON].diepctl);
775
776 debug_cond(DEBUG_EP0 != 0,
777 "%s: set ep%d stall, DIEPCTL0 = 0x%p\n",
778 __func__, ep_index(ep), ®->device_regs.in_endp[EP0_CON].diepctl);
779 /*
780 * The application can only set this bit, and the core clears it,
781 * when a SETUP token is received for this endpoint
782 */
783 dev->ep0state = WAIT_FOR_SETUP;
784
785 dwc2_udc_pre_setup();
786 }
787
dwc2_ep0_read(struct dwc2_udc * dev)788 static void dwc2_ep0_read(struct dwc2_udc *dev)
789 {
790 struct dwc2_request *req;
791 struct dwc2_ep *ep = &dev->ep[0];
792
793 if (!list_empty(&ep->queue)) {
794 req = list_entry(ep->queue.next, struct dwc2_request, queue);
795
796 } else {
797 debug("%s: ---> BUG\n", __func__);
798 BUG();
799 return;
800 }
801
802 debug_cond(DEBUG_EP0 != 0,
803 "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
804 __func__, req, req->req.length, req->req.actual);
805
806 if (req->req.length == 0) {
807 /* zlp for Set_configuration, Set_interface,
808 * or Bulk-Only mass storge reset */
809
810 ep->len = 0;
811 dwc2_udc_ep0_zlp(dev);
812
813 debug_cond(DEBUG_EP0 != 0,
814 "%s: req.length = 0, bRequest = %d\n",
815 __func__, usb_ctrl->bRequest);
816 return;
817 }
818
819 setdma_rx(ep, req);
820 }
821
822 /*
823 * DATA_STATE_XMIT
824 */
dwc2_ep0_write(struct dwc2_udc * dev)825 static int dwc2_ep0_write(struct dwc2_udc *dev)
826 {
827 struct dwc2_request *req;
828 struct dwc2_ep *ep = &dev->ep[0];
829 int ret, need_zlp = 0;
830
831 if (list_empty(&ep->queue))
832 req = 0;
833 else
834 req = list_entry(ep->queue.next, struct dwc2_request, queue);
835
836 if (!req) {
837 debug_cond(DEBUG_EP0 != 0, "%s: NULL REQ\n", __func__);
838 return 0;
839 }
840
841 debug_cond(DEBUG_EP0 != 0,
842 "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
843 __func__, req, req->req.length, req->req.actual);
844
845 if (req->req.length - req->req.actual == ep0_fifo_size) {
846 /* Next write will end with the packet size, */
847 /* so we need Zero-length-packet */
848 need_zlp = 1;
849 }
850
851 ret = write_fifo_ep0(ep, req);
852
853 if ((ret == 1) && !need_zlp) {
854 /* Last packet */
855 dev->ep0state = WAIT_FOR_COMPLETE;
856 debug_cond(DEBUG_EP0 != 0,
857 "%s: finished, waiting for status\n", __func__);
858
859 } else {
860 dev->ep0state = DATA_STATE_XMIT;
861 debug_cond(DEBUG_EP0 != 0,
862 "%s: not finished\n", __func__);
863 }
864
865 return 1;
866 }
867
dwc2_udc_get_status(struct dwc2_udc * dev,struct usb_ctrlrequest * crq)868 static int dwc2_udc_get_status(struct dwc2_udc *dev,
869 struct usb_ctrlrequest *crq)
870 {
871 u8 ep_num = crq->wIndex & 0x3;
872 u16 g_status = 0;
873
874 debug_cond(DEBUG_SETUP != 0,
875 "%s: *** USB_REQ_GET_STATUS\n", __func__);
876 printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK);
877 switch (crq->bRequestType & USB_RECIP_MASK) {
878 case USB_RECIP_INTERFACE:
879 g_status = 0;
880 debug_cond(DEBUG_SETUP != 0,
881 "\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n",
882 g_status);
883 break;
884
885 case USB_RECIP_DEVICE:
886 g_status = 0x1; /* Self powered */
887 debug_cond(DEBUG_SETUP != 0,
888 "\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n",
889 g_status);
890 break;
891
892 case USB_RECIP_ENDPOINT:
893 if (crq->wLength > 2) {
894 debug_cond(DEBUG_SETUP != 0,
895 "\tGET_STATUS:Not support EP or wLength\n");
896 return 1;
897 }
898
899 g_status = dev->ep[ep_num].stopped;
900 debug_cond(DEBUG_SETUP != 0,
901 "\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n",
902 g_status);
903
904 break;
905
906 default:
907 return 1;
908 }
909
910 memcpy(usb_ctrl, &g_status, sizeof(g_status));
911
912 flush_dcache_range((unsigned long) usb_ctrl,
913 (unsigned long) usb_ctrl +
914 ROUND(sizeof(g_status), CONFIG_SYS_CACHELINE_SIZE));
915
916 writel(phys_to_bus(usb_ctrl_dma_addr), ®->device_regs.in_endp[EP0_CON].diepdma);
917 writel(FIELD_PREP(DXEPTSIZ_PKTCNT_MASK, 1) | FIELD_PREP(DXEPTSIZ_XFERSIZE_MASK, 2),
918 ®->device_regs.in_endp[EP0_CON].dieptsiz);
919
920 setbits_le32(®->device_regs.in_endp[EP0_CON].diepctl, DXEPCTL_EPENA | DXEPCTL_CNAK);
921 dev->ep0state = WAIT_FOR_NULL_COMPLETE;
922
923 return 0;
924 }
925
dwc2_udc_set_nak(struct dwc2_ep * ep)926 static void dwc2_udc_set_nak(struct dwc2_ep *ep)
927 {
928 u8 ep_num;
929
930 ep_num = ep_index(ep);
931 debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
932
933 if (ep_is_in(ep)) {
934 setbits_le32(®->device_regs.in_endp[ep_num].diepctl, DXEPCTL_SNAK);
935 debug("%s: set NAK, DIEPCTL%d = 0x%x\n",
936 __func__, ep_num, readl(®->device_regs.in_endp[ep_num].diepctl));
937 } else {
938 setbits_le32(®->device_regs.out_endp[ep_num].doepctl, DXEPCTL_SNAK);
939 debug("%s: set NAK, DOEPCTL%d = 0x%x\n",
940 __func__, ep_num, readl(®->device_regs.out_endp[ep_num].doepctl));
941 }
942
943 return;
944 }
945
dwc2_udc_ep_set_stall(struct dwc2_ep * ep)946 static void dwc2_udc_ep_set_stall(struct dwc2_ep *ep)
947 {
948 u8 ep_num;
949 u32 ep_ctrl = 0;
950
951 ep_num = ep_index(ep);
952 debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
953
954 if (ep_is_in(ep)) {
955 ep_ctrl = readl(®->device_regs.in_endp[ep_num].diepctl);
956
957 /* set the disable and stall bits */
958 if (ep_ctrl & DXEPCTL_EPENA)
959 ep_ctrl |= DXEPCTL_EPDIS;
960
961 ep_ctrl |= DXEPCTL_STALL;
962
963 writel(ep_ctrl, ®->device_regs.in_endp[ep_num].diepctl);
964 debug("%s: set stall, DIEPCTL%d = 0x%x\n",
965 __func__, ep_num, readl(®->device_regs.in_endp[ep_num].diepctl));
966
967 } else {
968 /* set the stall bit */
969 setbits_le32(®->device_regs.out_endp[ep_num].doepctl, DXEPCTL_STALL);
970 debug("%s: set stall, DOEPCTL%d = 0x%x\n",
971 __func__, ep_num, readl(®->device_regs.out_endp[ep_num].doepctl));
972 }
973
974 return;
975 }
976
dwc2_udc_ep_clear_stall(struct dwc2_ep * ep)977 static void dwc2_udc_ep_clear_stall(struct dwc2_ep *ep)
978 {
979 u8 ep_num;
980 u32 ep_ctrl = 0;
981
982 ep_num = ep_index(ep);
983 debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
984
985 if (ep_is_in(ep)) {
986 ep_ctrl = readl(®->device_regs.in_endp[ep_num].diepctl);
987
988 /* clear stall bit */
989 ep_ctrl &= ~DXEPCTL_STALL;
990
991 /*
992 * USB Spec 9.4.5: For endpoints using data toggle, regardless
993 * of whether an endpoint has the Halt feature set, a
994 * ClearFeature(ENDPOINT_HALT) request always results in the
995 * data toggle being reinitialized to DATA0.
996 */
997 if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
998 || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
999 ep_ctrl |= DXEPCTL_SETD0PID; /* DATA0 */
1000 }
1001
1002 writel(ep_ctrl, ®->device_regs.in_endp[ep_num].diepctl);
1003 debug("%s: cleared stall, DIEPCTL%d = 0x%x\n",
1004 __func__, ep_num, readl(®->device_regs.in_endp[ep_num].diepctl));
1005
1006 } else {
1007 ep_ctrl = readl(®->device_regs.out_endp[ep_num].doepctl);
1008
1009 /* clear stall bit */
1010 ep_ctrl &= ~DXEPCTL_STALL;
1011
1012 if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
1013 || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
1014 ep_ctrl |= DXEPCTL_SETD0PID; /* DATA0 */
1015 }
1016
1017 writel(ep_ctrl, ®->device_regs.out_endp[ep_num].doepctl);
1018 debug("%s: cleared stall, DOEPCTL%d = 0x%x\n",
1019 __func__, ep_num, readl(®->device_regs.out_endp[ep_num].doepctl));
1020 }
1021
1022 return;
1023 }
1024
dwc2_udc_set_halt(struct usb_ep * _ep,int value)1025 static int dwc2_udc_set_halt(struct usb_ep *_ep, int value)
1026 {
1027 struct dwc2_ep *ep;
1028 struct dwc2_udc *dev;
1029 unsigned long flags = 0;
1030 u8 ep_num;
1031
1032 ep = container_of(_ep, struct dwc2_ep, ep);
1033 ep_num = ep_index(ep);
1034
1035 if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON ||
1036 ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) {
1037 debug("%s: %s bad ep or descriptor\n", __func__, ep->ep.name);
1038 return -EINVAL;
1039 }
1040
1041 /* Attempt to halt IN ep will fail if any transfer requests
1042 * are still queue */
1043 if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1044 debug("%s: %s queue not empty, req = %p\n",
1045 __func__, ep->ep.name,
1046 list_entry(ep->queue.next, struct dwc2_request, queue));
1047
1048 return -EAGAIN;
1049 }
1050
1051 dev = ep->dev;
1052 debug("%s: ep_num = %d, value = %d\n", __func__, ep_num, value);
1053
1054 spin_lock_irqsave(&dev->lock, flags);
1055
1056 if (value == 0) {
1057 ep->stopped = 0;
1058 dwc2_udc_ep_clear_stall(ep);
1059 } else {
1060 if (ep_num == 0)
1061 dev->ep0state = WAIT_FOR_SETUP;
1062
1063 ep->stopped = 1;
1064 dwc2_udc_ep_set_stall(ep);
1065 }
1066
1067 spin_unlock_irqrestore(&dev->lock, flags);
1068
1069 return 0;
1070 }
1071
dwc2_udc_ep_activate(struct dwc2_ep * ep)1072 static void dwc2_udc_ep_activate(struct dwc2_ep *ep)
1073 {
1074 u8 ep_num;
1075 u32 ep_ctrl = 0, daintmsk = 0;
1076
1077 ep_num = ep_index(ep);
1078
1079 /* Read DEPCTLn register */
1080 if (ep_is_in(ep)) {
1081 ep_ctrl = readl(®->device_regs.in_endp[ep_num].diepctl);
1082 daintmsk = FIELD_PREP(DAINT_INEP_MASK, BIT(ep_num));
1083 } else {
1084 ep_ctrl = readl(®->device_regs.out_endp[ep_num].doepctl);
1085 daintmsk = FIELD_PREP(DAINT_OUTEP_MASK, BIT(ep_num));
1086 }
1087
1088 debug("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n",
1089 __func__, ep_num, ep_ctrl, ep_is_in(ep));
1090
1091 /* If the EP is already active don't change the EP Control
1092 * register. */
1093 if (!(ep_ctrl & DXEPCTL_USBACTEP)) {
1094 ep_ctrl = (ep_ctrl & ~DXEPCTL_EPTYPE_MASK) |
1095 FIELD_PREP(DXEPCTL_EPTYPE_MASK, ep->bmAttributes);
1096 ep_ctrl = (ep_ctrl & ~DXEPCTL_MPS_MASK) |
1097 FIELD_PREP(DXEPCTL_MPS_MASK, ep->ep.maxpacket);
1098 ep_ctrl |= (DXEPCTL_SETD0PID | DXEPCTL_USBACTEP | DXEPCTL_SNAK);
1099
1100 if (ep_is_in(ep)) {
1101 writel(ep_ctrl, ®->device_regs.in_endp[ep_num].diepctl);
1102 debug("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n",
1103 __func__, ep_num, ep_num,
1104 readl(®->device_regs.in_endp[ep_num].diepctl));
1105 } else {
1106 writel(ep_ctrl, ®->device_regs.out_endp[ep_num].doepctl);
1107 debug("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n",
1108 __func__, ep_num, ep_num,
1109 readl(®->device_regs.out_endp[ep_num].doepctl));
1110 }
1111 }
1112
1113 /* Unmask EP Interrtupt */
1114 setbits_le32(®->device_regs.daintmsk, daintmsk);
1115 debug("%s: DAINTMSK = 0x%x\n", __func__, readl(®->device_regs.daintmsk));
1116 }
1117
dwc2_udc_clear_feature(struct usb_ep * _ep)1118 static int dwc2_udc_clear_feature(struct usb_ep *_ep)
1119 {
1120 struct dwc2_udc *dev;
1121 struct dwc2_ep *ep;
1122 u8 ep_num;
1123
1124 ep = container_of(_ep, struct dwc2_ep, ep);
1125 ep_num = ep_index(ep);
1126
1127 dev = ep->dev;
1128 debug_cond(DEBUG_SETUP != 0,
1129 "%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n",
1130 __func__, ep_num, ep_is_in(ep), clear_feature_flag);
1131
1132 if (usb_ctrl->wLength != 0) {
1133 debug_cond(DEBUG_SETUP != 0,
1134 "\tCLEAR_FEATURE: wLength is not zero.....\n");
1135 return 1;
1136 }
1137
1138 switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
1139 case USB_RECIP_DEVICE:
1140 switch (usb_ctrl->wValue) {
1141 case USB_DEVICE_REMOTE_WAKEUP:
1142 debug_cond(DEBUG_SETUP != 0,
1143 "\tOFF:USB_DEVICE_REMOTE_WAKEUP\n");
1144 break;
1145
1146 case USB_DEVICE_TEST_MODE:
1147 debug_cond(DEBUG_SETUP != 0,
1148 "\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n");
1149 /** @todo Add CLEAR_FEATURE for TEST modes. */
1150 break;
1151 }
1152
1153 dwc2_udc_ep0_zlp(dev);
1154 break;
1155
1156 case USB_RECIP_ENDPOINT:
1157 debug_cond(DEBUG_SETUP != 0,
1158 "\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n",
1159 usb_ctrl->wValue);
1160
1161 if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
1162 if (ep_num == 0) {
1163 dwc2_udc_ep0_set_stall(ep);
1164 return 0;
1165 }
1166
1167 dwc2_udc_ep0_zlp(dev);
1168
1169 dwc2_udc_ep_clear_stall(ep);
1170 dwc2_udc_ep_activate(ep);
1171 ep->stopped = 0;
1172
1173 clear_feature_num = ep_num;
1174 clear_feature_flag = 1;
1175 }
1176 break;
1177 }
1178
1179 return 0;
1180 }
1181
dwc2_udc_set_feature(struct usb_ep * _ep)1182 static int dwc2_udc_set_feature(struct usb_ep *_ep)
1183 {
1184 struct dwc2_udc *dev;
1185 struct dwc2_ep *ep;
1186 u8 ep_num;
1187
1188 ep = container_of(_ep, struct dwc2_ep, ep);
1189 ep_num = ep_index(ep);
1190 dev = ep->dev;
1191
1192 debug_cond(DEBUG_SETUP != 0,
1193 "%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n",
1194 __func__, ep_num);
1195
1196 if (usb_ctrl->wLength != 0) {
1197 debug_cond(DEBUG_SETUP != 0,
1198 "\tSET_FEATURE: wLength is not zero.....\n");
1199 return 1;
1200 }
1201
1202 switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
1203 case USB_RECIP_DEVICE:
1204 switch (usb_ctrl->wValue) {
1205 case USB_DEVICE_REMOTE_WAKEUP:
1206 debug_cond(DEBUG_SETUP != 0,
1207 "\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n");
1208 break;
1209 case USB_DEVICE_B_HNP_ENABLE:
1210 debug_cond(DEBUG_SETUP != 0,
1211 "\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
1212 break;
1213
1214 case USB_DEVICE_A_HNP_SUPPORT:
1215 /* RH port supports HNP */
1216 debug_cond(DEBUG_SETUP != 0,
1217 "\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n");
1218 break;
1219
1220 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1221 /* other RH port does */
1222 debug_cond(DEBUG_SETUP != 0,
1223 "\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
1224 break;
1225 }
1226
1227 dwc2_udc_ep0_zlp(dev);
1228 return 0;
1229
1230 case USB_RECIP_INTERFACE:
1231 debug_cond(DEBUG_SETUP != 0,
1232 "\tSET_FEATURE: USB_RECIP_INTERFACE\n");
1233 break;
1234
1235 case USB_RECIP_ENDPOINT:
1236 debug_cond(DEBUG_SETUP != 0,
1237 "\tSET_FEATURE: USB_RECIP_ENDPOINT\n");
1238 if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
1239 if (ep_num == 0) {
1240 dwc2_udc_ep0_set_stall(ep);
1241 return 0;
1242 }
1243 ep->stopped = 1;
1244 dwc2_udc_ep_set_stall(ep);
1245 }
1246
1247 dwc2_udc_ep0_zlp(dev);
1248 return 0;
1249 }
1250
1251 return 1;
1252 }
1253
1254 /*
1255 * WAIT_FOR_SETUP (OUT_PKT_RDY)
1256 */
dwc2_ep0_setup(struct dwc2_udc * dev)1257 static void dwc2_ep0_setup(struct dwc2_udc *dev)
1258 {
1259 struct dwc2_ep *ep = &dev->ep[0];
1260 int i;
1261 u8 ep_num;
1262
1263 /* Nuke all previous transfers */
1264 nuke(ep, -EPROTO);
1265
1266 /* read control req from fifo (8 bytes) */
1267 dwc2_fifo_read(ep, usb_ctrl, 8);
1268
1269 debug_cond(DEBUG_SETUP != 0,
1270 "%s: bRequestType = 0x%x(%s), bRequest = 0x%x"
1271 "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n",
1272 __func__, usb_ctrl->bRequestType,
1273 (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT",
1274 usb_ctrl->bRequest,
1275 usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex);
1276
1277 #ifdef DEBUG
1278 {
1279 int i, len = sizeof(*usb_ctrl);
1280 char *p = (char *)usb_ctrl;
1281
1282 printf("pkt = ");
1283 for (i = 0; i < len; i++) {
1284 printf("%02x", ((u8 *)p)[i]);
1285 if ((i & 7) == 7)
1286 printf(" ");
1287 }
1288 printf("\n");
1289 }
1290 #endif
1291
1292 if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST &&
1293 usb_ctrl->wLength != 1) {
1294 debug_cond(DEBUG_SETUP != 0,
1295 "\t%s:GET_MAX_LUN_REQUEST:invalid",
1296 __func__);
1297 debug_cond(DEBUG_SETUP != 0,
1298 "wLength = %d, setup returned\n",
1299 usb_ctrl->wLength);
1300
1301 dwc2_udc_ep0_set_stall(ep);
1302 dev->ep0state = WAIT_FOR_SETUP;
1303
1304 return;
1305 } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST &&
1306 usb_ctrl->wLength != 0) {
1307 /* Bulk-Only *mass storge reset of class-specific request */
1308 debug_cond(DEBUG_SETUP != 0,
1309 "%s:BOT Rest:invalid wLength =%d, setup returned\n",
1310 __func__, usb_ctrl->wLength);
1311
1312 dwc2_udc_ep0_set_stall(ep);
1313 dev->ep0state = WAIT_FOR_SETUP;
1314
1315 return;
1316 }
1317
1318 /* Set direction of EP0 */
1319 if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) {
1320 ep->bEndpointAddress |= USB_DIR_IN;
1321 } else {
1322 ep->bEndpointAddress &= ~USB_DIR_IN;
1323 }
1324 /* cope with automagic for some standard requests. */
1325 dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK)
1326 == USB_TYPE_STANDARD;
1327
1328 dev->req_pending = 1;
1329
1330 /* Handle some SETUP packets ourselves */
1331 if (dev->req_std) {
1332 switch (usb_ctrl->bRequest) {
1333 case USB_REQ_SET_ADDRESS:
1334 debug_cond(DEBUG_SETUP != 0,
1335 "%s: *** USB_REQ_SET_ADDRESS (%d)\n",
1336 __func__, usb_ctrl->wValue);
1337 if (usb_ctrl->bRequestType
1338 != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
1339 break;
1340
1341 udc_set_address(dev, usb_ctrl->wValue);
1342 return;
1343
1344 case USB_REQ_SET_CONFIGURATION:
1345 debug_cond(DEBUG_SETUP != 0,
1346 "=====================================\n");
1347 debug_cond(DEBUG_SETUP != 0,
1348 "%s: USB_REQ_SET_CONFIGURATION (%d)\n",
1349 __func__, usb_ctrl->wValue);
1350
1351 if (usb_ctrl->bRequestType == USB_RECIP_DEVICE)
1352 reset_available = 1;
1353
1354 break;
1355
1356 case USB_REQ_GET_DESCRIPTOR:
1357 debug_cond(DEBUG_SETUP != 0,
1358 "%s: *** USB_REQ_GET_DESCRIPTOR\n",
1359 __func__);
1360 break;
1361
1362 case USB_REQ_SET_INTERFACE:
1363 debug_cond(DEBUG_SETUP != 0,
1364 "%s: *** USB_REQ_SET_INTERFACE (%d)\n",
1365 __func__, usb_ctrl->wValue);
1366
1367 if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE)
1368 reset_available = 1;
1369
1370 break;
1371
1372 case USB_REQ_GET_CONFIGURATION:
1373 debug_cond(DEBUG_SETUP != 0,
1374 "%s: *** USB_REQ_GET_CONFIGURATION\n",
1375 __func__);
1376 break;
1377
1378 case USB_REQ_GET_STATUS:
1379 if (!dwc2_udc_get_status(dev, usb_ctrl))
1380 return;
1381
1382 break;
1383
1384 case USB_REQ_CLEAR_FEATURE:
1385 ep_num = usb_ctrl->wIndex & 0x3;
1386
1387 if (!dwc2_udc_clear_feature(&dev->ep[ep_num].ep))
1388 return;
1389
1390 break;
1391
1392 case USB_REQ_SET_FEATURE:
1393 ep_num = usb_ctrl->wIndex & 0x3;
1394
1395 if (!dwc2_udc_set_feature(&dev->ep[ep_num].ep))
1396 return;
1397
1398 break;
1399
1400 default:
1401 debug_cond(DEBUG_SETUP != 0,
1402 "%s: *** Default of usb_ctrl->bRequest=0x%x"
1403 "happened.\n", __func__, usb_ctrl->bRequest);
1404 break;
1405 }
1406 }
1407
1408 if (likely(dev->driver)) {
1409 /* device-2-host (IN) or no data setup command,
1410 * process immediately */
1411 debug_cond(DEBUG_SETUP != 0,
1412 "%s:usb_ctrlreq will be passed to fsg_setup()\n",
1413 __func__);
1414
1415 spin_unlock(&dev->lock);
1416 i = dev->driver->setup(&dev->gadget, usb_ctrl);
1417 spin_lock(&dev->lock);
1418
1419 if (i < 0) {
1420 /* setup processing failed, force stall */
1421 dwc2_udc_ep0_set_stall(ep);
1422 dev->ep0state = WAIT_FOR_SETUP;
1423
1424 debug_cond(DEBUG_SETUP != 0,
1425 "\tdev->driver->setup failed (%d),"
1426 " bRequest = %d\n",
1427 i, usb_ctrl->bRequest);
1428
1429 } else if (dev->req_pending) {
1430 dev->req_pending = 0;
1431 debug_cond(DEBUG_SETUP != 0,
1432 "\tdev->req_pending...\n");
1433 }
1434
1435 debug_cond(DEBUG_SETUP != 0,
1436 "\tep0state = %s\n", state_names[dev->ep0state]);
1437
1438 }
1439 }
1440
1441 /*
1442 * handle ep0 interrupt
1443 */
dwc2_handle_ep0(struct dwc2_udc * dev)1444 static void dwc2_handle_ep0(struct dwc2_udc *dev)
1445 {
1446 if (dev->ep0state == WAIT_FOR_SETUP) {
1447 debug_cond(DEBUG_OUT_EP != 0,
1448 "%s: WAIT_FOR_SETUP\n", __func__);
1449 dwc2_ep0_setup(dev);
1450
1451 } else {
1452 debug_cond(DEBUG_OUT_EP != 0,
1453 "%s: strange state!!(state = %s)\n",
1454 __func__, state_names[dev->ep0state]);
1455 }
1456 }
1457
dwc2_ep0_kick(struct dwc2_udc * dev,struct dwc2_ep * ep)1458 static void dwc2_ep0_kick(struct dwc2_udc *dev, struct dwc2_ep *ep)
1459 {
1460 debug_cond(DEBUG_EP0 != 0,
1461 "%s: ep_is_in = %d\n", __func__, ep_is_in(ep));
1462 if (ep_is_in(ep)) {
1463 dev->ep0state = DATA_STATE_XMIT;
1464 dwc2_ep0_write(dev);
1465
1466 } else {
1467 dev->ep0state = DATA_STATE_RECV;
1468 dwc2_ep0_read(dev);
1469 }
1470 }
1471