1 /*
2 * Copyright (C) 2010 - 2019 Xilinx, Inc.
3 * Copyright (C) 2021 WangHuachen.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
20 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
22 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
26 * OF SUCH DAMAGE.
27 *
28 * This file is part of the lwIP TCP/IP stack.
29 *
30 */
31
32 #include "lwipopts.h"
33 #include "lwip/stats.h"
34 #include "lwip/sys.h"
35 #include "lwip/inet_chksum.h"
36
37 #include "netif/xadapter.h"
38 #include "netif/xemacpsif.h"
39 #include "xstatus.h"
40
41 #include "xlwipconfig.h"
42 #include "xparameters.h"
43 #include "xparameters_ps.h"
44 // #include "xil_exception.h"
45 #include "xil_mmu.h"
46 #if defined (ARMR5)
47 #include "xreg_cortexr5.h"
48 #endif
49 #ifdef CONFIG_XTRACE
50 #include "xtrace.h"
51 #endif
52 #ifdef OS_IS_FREERTOS
53 #include "FreeRTOS.h"
54 #include "semphr.h"
55 #include "timers.h"
56 #endif
57
58 #include <stdio.h>
59
60 #define INTC_BASE_ADDR XPAR_SCUGIC_0_CPU_BASEADDR
61 #define INTC_DIST_BASE_ADDR XPAR_SCUGIC_0_DIST_BASEADDR
62
63 /* Byte alignment of BDs */
64 #define BD_ALIGNMENT (XEMACPS_DMABD_MINIMUM_ALIGNMENT*2)
65
66 /* A max of 4 different ethernet interfaces are supported */
67 static UINTPTR tx_pbufs_storage[4*XLWIP_CONFIG_N_TX_DESC];
68 static UINTPTR rx_pbufs_storage[4*XLWIP_CONFIG_N_RX_DESC];
69
70 static s32_t emac_intr_num;
71
72 /******************************************************************************
73 * Each BD is of 8 bytes of size and the BDs (BD chain) need to be put
74 * at uncached memory location. If they are not put at uncached
75 * locations, the user needs to flush or invalidate for each BD/packet.
76 * However, the flush or invalidate can happen over a cache line which can
77 * span multiple BDs. This means a flush or invalidate of one BD can actually
78 * flush/invalidate multiple BDs adjacent to the targeted BD.Assuming that
79 * the user and hardware both update the BD fields, this operation from user
80 * can potentially overwrite the updates done by hardware or user.
81 * To avoid this, it is always safe to put the BD chains for Rx and tx side
82 * at uncached memory location.
83 *
84 * The Xilinx standalone BSP for Cortex A9 implements only primary page tables.
85 * Each table entry corresponds to 1 MB of address map. This means, if a memory
86 * region has to be made uncached, the minimum granularity will be of 1 MB.
87 *
88 * The implementation below allocates a 1 MB of u8 array aligned to 1 MB.
89 * This ensures that this array is put at 1 MB aligned memory (e.g. 0x1200000)
90 * and accupies memory of 1 MB. The init_dma function then changes 1 MB of this
91 * region to make it uncached (strongly ordered).
92 * This increases the bss section of the program significantly and can be a
93 * wastage of memory. The reason beings, BDs will hardly occupy few KBs of
94 * memory and the rest of 1 MB of memory will be unused.
95 *
96 * If a program uses other peripherals that have DMAs/bus masters and need
97 * uncached memory, they may also end of following the same approach. This
98 * definitely aggravates the memory wastage issue. To avoid all this, the user
99 * can create a new 1 MB section in the linker script and reserve it for such
100 * use cases that need uncached memory location. They can then have their own
101 * memory allocation logic in their application that allocates uncached memory
102 * from this 1 MB location. For such a case, changes need to be done in this
103 * file and appropriate uncached memory allocated through other means can be
104 * used.
105 *
106 * The present implementation here allocates 1 MB of uncached memory. It
107 * reserves of 64 KB of memory for each BD chain. 64 KB of memory means 8192 of
108 * BDs for each BD chain which is more than enough for any application.
109 * Assuming that both emac0 and emac1 are present, 256 KB of memory is allocated
110 * for BDs. The rest 768 KB of memory is just unused.
111 *********************************************************************************/
112
113 #if defined __aarch64__
114 u8_t bd_space[0x200000] __attribute__ ((aligned (0x200000)));
115 #else
116 u8_t bd_space[0x100000] __attribute__ ((aligned (0x100000)));
117 #endif
118 static volatile u32_t bd_space_index = 0;
119 static volatile u32_t bd_space_attr_set = 0;
120
121 #ifdef OS_IS_FREERTOS
122 long xInsideISR = 0;
123 #endif
124
125 #define XEMACPS_BD_TO_INDEX(ringptr, bdptr) \
126 (((UINTPTR)bdptr - (UINTPTR)(ringptr)->BaseBdAddr) / (ringptr)->Separation)
127
128
is_tx_space_available(xemacpsif_s * emac)129 s32_t is_tx_space_available(xemacpsif_s *emac)
130 {
131 XEmacPs_BdRing *txring;
132 s32_t freecnt = 0;
133
134 txring = &(XEmacPs_GetTxRing(&emac->emacps));
135
136 /* tx space is available as long as there are valid BD's */
137 freecnt = XEmacPs_BdRingGetFreeCnt(txring);
138 return freecnt;
139 }
140
141
142 static inline
get_base_index_txpbufsstorage(xemacpsif_s * xemacpsif)143 u32_t get_base_index_txpbufsstorage (xemacpsif_s *xemacpsif)
144 {
145 u32_t index;
146 #ifdef XPAR_XEMACPS_0_BASEADDR
147 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
148 index = 0;
149 }
150 #endif
151 #ifdef XPAR_XEMACPS_1_BASEADDR
152 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
153 index = XLWIP_CONFIG_N_TX_DESC;
154 }
155 #endif
156 #ifdef XPAR_XEMACPS_2_BASEADDR
157 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
158 index = 2 * XLWIP_CONFIG_N_TX_DESC;
159 }
160 #endif
161 #ifdef XPAR_XEMACPS_3_BASEADDR
162 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
163 index = 3 * XLWIP_CONFIG_N_TX_DESC;
164 }
165 #endif
166 return index;
167 }
168
169 static inline
get_base_index_rxpbufsstorage(xemacpsif_s * xemacpsif)170 u32_t get_base_index_rxpbufsstorage (xemacpsif_s *xemacpsif)
171 {
172 u32_t index;
173 #ifdef XPAR_XEMACPS_0_BASEADDR
174 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
175 index = 0;
176 }
177 #endif
178 #ifdef XPAR_XEMACPS_1_BASEADDR
179 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
180 index = XLWIP_CONFIG_N_RX_DESC;
181 }
182 #endif
183 #ifdef XPAR_XEMACPS_2_BASEADDR
184 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
185 index = 2 * XLWIP_CONFIG_N_RX_DESC;
186 }
187 #endif
188 #ifdef XPAR_XEMACPS_3_BASEADDR
189 if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
190 index = 3 * XLWIP_CONFIG_N_RX_DESC;
191 }
192 #endif
193 return index;
194 }
195
process_sent_bds(xemacpsif_s * xemacpsif,XEmacPs_BdRing * txring)196 void process_sent_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *txring)
197 {
198 XEmacPs_Bd *txbdset;
199 XEmacPs_Bd *curbdpntr;
200 s32_t n_bds;
201 XStatus status;
202 s32_t n_pbufs_freed = 0;
203 u32_t bdindex;
204 struct pbuf *p;
205 u32 *temp;
206 u32_t index;
207
208 index = get_base_index_txpbufsstorage (xemacpsif);
209
210 while (1) {
211 /* obtain processed BD's */
212 n_bds = XEmacPs_BdRingFromHwTx(txring,
213 XLWIP_CONFIG_N_TX_DESC, &txbdset);
214 if (n_bds == 0) {
215 return;
216 }
217 /* free the processed BD's */
218 n_pbufs_freed = n_bds;
219 curbdpntr = txbdset;
220 while (n_pbufs_freed > 0) {
221 bdindex = XEMACPS_BD_TO_INDEX(txring, curbdpntr);
222 temp = (u32 *)curbdpntr;
223 *temp = 0;
224 temp++;
225 if (bdindex == (XLWIP_CONFIG_N_TX_DESC - 1)) {
226 *temp = 0xC0000000;
227 } else {
228 *temp = 0x80000000;
229 }
230 dsb();
231 p = (struct pbuf *)tx_pbufs_storage[index + bdindex];
232 if (p != NULL) {
233 pbuf_free(p);
234 }
235 tx_pbufs_storage[index + bdindex] = 0;
236 curbdpntr = XEmacPs_BdRingNext(txring, curbdpntr);
237 n_pbufs_freed--;
238 dsb();
239 }
240
241 status = XEmacPs_BdRingFree(txring, n_bds, txbdset);
242 if (status != XST_SUCCESS) {
243 LWIP_DEBUGF(NETIF_DEBUG, ("Failure while freeing in Tx Done ISR\r\n"));
244 }
245 }
246 return;
247 }
248
emacps_send_handler(void * arg)249 void emacps_send_handler(void *arg)
250 {
251 struct xemac_s *xemac;
252 xemacpsif_s *xemacpsif;
253 XEmacPs_BdRing *txringptr;
254 u32_t regval;
255 #ifdef OS_IS_FREERTOS
256 xInsideISR++;
257 #endif
258 xemac = (struct xemac_s *)(arg);
259 xemacpsif = (xemacpsif_s *)(xemac->state);
260 txringptr = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
261 regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_TXSR_OFFSET);
262 XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,XEMACPS_TXSR_OFFSET, regval);
263
264 /* If Transmit done interrupt is asserted, process completed BD's */
265 /* Since RT-Thread does not support freeing memory in interrupts, comment it out */
266 // process_sent_bds(xemacpsif, txringptr);
267 #ifdef OS_IS_FREERTOS
268 xInsideISR--;
269 #endif
270 }
271
emacps_sgsend(xemacpsif_s * xemacpsif,struct pbuf * p)272 XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
273 {
274 struct pbuf *q;
275 s32_t n_pbufs;
276 XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
277 XEmacPs_Bd *temp_txbd;
278 XStatus status;
279 XEmacPs_BdRing *txring;
280 u32_t bdindex;
281 u32_t lev;
282 u32_t index;
283 u32_t max_fr_size;
284
285 lev = mfcpsr();
286 mtcpsr(lev | 0x000000C0);
287
288 txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
289
290 index = get_base_index_txpbufsstorage (xemacpsif);
291
292 /* first count the number of pbufs */
293 for (q = p, n_pbufs = 0; q != NULL; q = q->next)
294 n_pbufs++;
295
296 /* obtain as many BD's */
297 status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
298 if (status != XST_SUCCESS) {
299 mtcpsr(lev);
300 LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
301 return XST_FAILURE;
302 }
303
304 for(q = p, txbd = txbdset; q != NULL; q = q->next) {
305 bdindex = XEMACPS_BD_TO_INDEX(txring, txbd);
306 if (tx_pbufs_storage[index + bdindex] != 0) {
307 mtcpsr(lev);
308 LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
309 return XST_FAILURE;
310 }
311
312 /* Send the data from the pbuf to the interface, one pbuf at a
313 time. The size of the data in each pbuf is kept in the ->len
314 variable. */
315 if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
316 Xil_DCacheFlushRange((UINTPTR)q->payload, (UINTPTR)q->len);
317 }
318
319 XEmacPs_BdSetAddressTx(txbd, (UINTPTR)q->payload);
320
321 #ifdef ZYNQMP_USE_JUMBO
322 max_fr_size = MAX_FRAME_SIZE_JUMBO - 18;
323 #else
324 max_fr_size = XEMACPS_MAX_FRAME_SIZE - 18;
325 #endif
326 if (q->len > max_fr_size)
327 XEmacPs_BdSetLength(txbd, max_fr_size & 0x3FFF);
328 else
329 XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);
330
331 tx_pbufs_storage[index + bdindex] = (UINTPTR)q;
332
333 pbuf_ref(q);
334 last_txbd = txbd;
335 XEmacPs_BdClearLast(txbd);
336 txbd = XEmacPs_BdRingNext(txring, txbd);
337 }
338 XEmacPs_BdSetLast(last_txbd);
339 /* For fragmented packets, remember the 1st BD allocated for the 1st
340 packet fragment. The used bit for this BD should be cleared at the end
341 after clearing out used bits for other fragments. For packets without
342 just remember the allocated BD. */
343 temp_txbd = txbdset;
344 txbd = txbdset;
345 txbd = XEmacPs_BdRingNext(txring, txbd);
346 q = p->next;
347 for(; q != NULL; q = q->next) {
348 XEmacPs_BdClearTxUsed(txbd);
349 dsb();
350 txbd = XEmacPs_BdRingNext(txring, txbd);
351 }
352 XEmacPs_BdClearTxUsed(temp_txbd);
353 dsb();
354
355 status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
356 if (status != XST_SUCCESS) {
357 mtcpsr(lev);
358 LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
359 return XST_FAILURE;
360 }
361 /* Start transmit */
362 XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
363 XEMACPS_NWCTRL_OFFSET,
364 (XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
365 XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));
366
367 mtcpsr(lev);
368 return status;
369 }
370
setup_rx_bds(xemacpsif_s * xemacpsif,XEmacPs_BdRing * rxring)371 void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring)
372 {
373 XEmacPs_Bd *rxbd;
374 XStatus status;
375 struct pbuf *p;
376 u32_t freebds;
377 u32_t bdindex;
378 u32 *temp;
379 u32_t index;
380
381 index = get_base_index_rxpbufsstorage (xemacpsif);
382
383 freebds = XEmacPs_BdRingGetFreeCnt (rxring);
384 while (freebds > 0) {
385 freebds--;
386 #ifdef ZYNQMP_USE_JUMBO
387 p = pbuf_alloc(PBUF_RAW, MAX_FRAME_SIZE_JUMBO, PBUF_POOL);
388 #else
389 p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
390 #endif
391 if (!p) {
392 #if LINK_STATS
393 lwip_stats.link.memerr++;
394 lwip_stats.link.drop++;
395 #endif
396 rt_kprintf("unable to alloc pbuf in recv_handler\r\n");
397 return;
398 }
399 status = XEmacPs_BdRingAlloc(rxring, 1, &rxbd);
400 if (status != XST_SUCCESS) {
401 LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
402 pbuf_free(p);
403 return;
404 }
405 status = XEmacPs_BdRingToHw(rxring, 1, rxbd);
406 if (status != XST_SUCCESS) {
407 LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
408 if (status == XST_DMA_SG_LIST_ERROR) {
409 LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n"));
410 }
411 else {
412 LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));
413 }
414
415 pbuf_free(p);
416 XEmacPs_BdRingUnAlloc(rxring, 1, rxbd);
417 return;
418 }
419 #ifdef ZYNQMP_USE_JUMBO
420 if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
421 Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)MAX_FRAME_SIZE_JUMBO);
422 }
423 #else
424 if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
425 Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)XEMACPS_MAX_FRAME_SIZE);
426 }
427 #endif
428 bdindex = XEMACPS_BD_TO_INDEX(rxring, rxbd);
429 temp = (u32 *)rxbd;
430 if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
431 *temp = 0x00000002;
432 } else {
433 *temp = 0;
434 }
435 temp++;
436 *temp = 0;
437 dsb();
438
439 XEmacPs_BdSetAddressRx(rxbd, (UINTPTR)p->payload);
440 rx_pbufs_storage[index + bdindex] = (UINTPTR)p;
441 }
442 }
443
emacps_recv_handler(void * arg)444 void emacps_recv_handler(void *arg)
445 {
446 struct pbuf *p;
447 XEmacPs_Bd *rxbdset, *curbdptr;
448 struct xemac_s *xemac;
449 xemacpsif_s *xemacpsif;
450 XEmacPs_BdRing *rxring;
451 volatile s32_t bd_processed;
452 s32_t rx_bytes, k;
453 u32_t bdindex;
454 u32_t regval;
455 u32_t index;
456 u32_t gigeversion;
457
458 xemac = (struct xemac_s *)(arg);
459 xemacpsif = (xemacpsif_s *)(xemac->state);
460 rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps);
461
462 #ifdef OS_IS_FREERTOS
463 xInsideISR++;
464 #endif
465
466 gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
467 index = get_base_index_rxpbufsstorage (xemacpsif);
468 /*
469 * If Reception done interrupt is asserted, call RX call back function
470 * to handle the processed BDs and then raise the according flag.
471 */
472 regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET);
473 XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET, regval);
474 if (gigeversion <= 2) {
475 resetrx_on_no_rxdata(xemacpsif);
476 }
477
478 while(1) {
479
480 bd_processed = XEmacPs_BdRingFromHwRx(rxring, XLWIP_CONFIG_N_RX_DESC, &rxbdset);
481 if (bd_processed <= 0) {
482 break;
483 }
484 for (k = 0, curbdptr=rxbdset; k < bd_processed; k++) {
485
486 bdindex = XEMACPS_BD_TO_INDEX(rxring, curbdptr);
487 p = (struct pbuf *)rx_pbufs_storage[index + bdindex];
488 /*
489 * Adjust the buffer size to the actual number of bytes received.
490 */
491 #ifdef ZYNQMP_USE_JUMBO
492 rx_bytes = XEmacPs_GetRxFrameSize(&xemacpsif->emacps, curbdptr);
493 #else
494 rx_bytes = XEmacPs_BdGetLength(curbdptr);
495 #endif
496 pbuf_realloc(p, rx_bytes);
497 /* Invalidate RX frame before queuing to handle
498 * L1 cache prefetch conditions on any architecture.
499 */
500 Xil_DCacheInvalidateRange((UINTPTR)p->payload, rx_bytes);
501 /* store it in the receive queue,
502 * where it'll be processed by a different handler
503 */
504 if (pq_enqueue(xemacpsif->recv_q, (void*)p) < 0) {
505 #if LINK_STATS
506 lwip_stats.link.memerr++;
507 lwip_stats.link.drop++;
508 #endif
509 pbuf_free(p);
510 }
511 curbdptr = XEmacPs_BdRingNext( rxring, curbdptr);
512 }
513 /* free up the BD's */
514 XEmacPs_BdRingFree(rxring, bd_processed, rxbdset);
515 setup_rx_bds(xemacpsif, rxring);
516 eth_device_ready(xemac->rt_eth_device);
517 }
518
519 #ifdef OS_IS_FREERTOS
520 xInsideISR--;
521 #endif
522 return;
523 }
524
clean_dma_txdescs(struct xemac_s * xemac)525 void clean_dma_txdescs(struct xemac_s *xemac)
526 {
527 XEmacPs_Bd bdtemplate;
528 XEmacPs_BdRing *txringptr;
529 xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
530
531 txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
532
533 XEmacPs_BdClear(&bdtemplate);
534 XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
535
536 /*
537 * Create the TxBD ring
538 */
539 XEmacPs_BdRingCreate(txringptr, (UINTPTR) xemacpsif->tx_bdspace,
540 (UINTPTR) xemacpsif->tx_bdspace, BD_ALIGNMENT,
541 XLWIP_CONFIG_N_TX_DESC);
542 XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
543 }
544
init_dma(struct xemac_s * xemac)545 XStatus init_dma(struct xemac_s *xemac)
546 {
547 XEmacPs_Bd bdtemplate;
548 XEmacPs_BdRing *rxringptr, *txringptr;
549 XEmacPs_Bd *rxbd;
550 struct pbuf *p;
551 XStatus status;
552 s32_t i;
553 u32_t bdindex;
554 volatile UINTPTR tempaddress;
555 u32_t index;
556 u32_t gigeversion;
557 XEmacPs_Bd *bdtxterminate;
558 XEmacPs_Bd *bdrxterminate;
559 u32 *temp;
560
561 xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
562 struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
563
564 index = get_base_index_rxpbufsstorage (xemacpsif);
565 gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
566 /*
567 * The BDs need to be allocated in uncached memory. Hence the 1 MB
568 * address range allocated for Bd_Space is made uncached
569 * by setting appropriate attributes in the translation table.
570 * The Bd_Space is aligned to 1MB and has a size of 1 MB. This ensures
571 * a reserved uncached area used only for BDs.
572 */
573 if (bd_space_attr_set == 0) {
574 #if defined (ARMR5)
575 Xil_SetTlbAttributes((s32_t)bd_space, STRONG_ORDERD_SHARED | PRIV_RW_USER_RW); // addr, attr
576 #else
577 #if defined __aarch64__
578 Xil_SetTlbAttributes((u64)bd_space, NORM_NONCACHE | INNER_SHAREABLE);
579 #else
580 Xil_SetTlbAttributes((s32_t)bd_space, DEVICE_MEMORY); // addr, attr
581 #endif
582 #endif
583 bd_space_attr_set = 1;
584 }
585
586 rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
587 txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
588 LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
589 LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));
590
591 /* Allocate 64k for Rx and Tx bds each to take care of extreme cases */
592 tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
593 xemacpsif->rx_bdspace = (void *)tempaddress;
594 bd_space_index += 0x10000;
595 tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
596 xemacpsif->tx_bdspace = (void *)tempaddress;
597 bd_space_index += 0x10000;
598 if (gigeversion > 2) {
599 tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
600 bdrxterminate = (XEmacPs_Bd *)tempaddress;
601 bd_space_index += 0x10000;
602 tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
603 bdtxterminate = (XEmacPs_Bd *)tempaddress;
604 bd_space_index += 0x10000;
605 }
606
607 LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: %p \r\n", xemacpsif->rx_bdspace));
608 LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: %p \r\n", xemacpsif->tx_bdspace));
609
610 if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
611 xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
612 __FILE__, __LINE__);
613 return ERR_IF;
614 }
615
616 /*
617 * Setup RxBD space.
618 *
619 * Setup a BD template for the Rx channel. This template will be copied to
620 * every RxBD. We will not have to explicitly set these again.
621 */
622 XEmacPs_BdClear(&bdtemplate);
623
624 /*
625 * Create the RxBD ring
626 */
627
628 status = XEmacPs_BdRingCreate(rxringptr, (UINTPTR) xemacpsif->rx_bdspace,
629 (UINTPTR) xemacpsif->rx_bdspace, BD_ALIGNMENT,
630 XLWIP_CONFIG_N_RX_DESC);
631
632 if (status != XST_SUCCESS) {
633 LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
634 return ERR_IF;
635 }
636
637 status = XEmacPs_BdRingClone(rxringptr, &bdtemplate, XEMACPS_RECV);
638 if (status != XST_SUCCESS) {
639 LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
640 return ERR_IF;
641 }
642
643 XEmacPs_BdClear(&bdtemplate);
644 XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
645 /*
646 * Create the TxBD ring
647 */
648 status = XEmacPs_BdRingCreate(txringptr, (UINTPTR) xemacpsif->tx_bdspace,
649 (UINTPTR) xemacpsif->tx_bdspace, BD_ALIGNMENT,
650 XLWIP_CONFIG_N_TX_DESC);
651
652 if (status != XST_SUCCESS) {
653 return ERR_IF;
654 }
655
656 /* We reuse the bd template, as the same one will work for both rx and tx. */
657 status = XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
658 if (status != XST_SUCCESS) {
659 return ERR_IF;
660 }
661
662 /*
663 * Allocate RX descriptors, 1 RxBD at a time.
664 */
665 for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
666 #ifdef ZYNQMP_USE_JUMBO
667 p = pbuf_alloc(PBUF_RAW, MAX_FRAME_SIZE_JUMBO, PBUF_POOL);
668 #else
669 p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
670 #endif
671 if (!p) {
672 #if LINK_STATS
673 lwip_stats.link.memerr++;
674 lwip_stats.link.drop++;
675 #endif
676 rt_kprintf("unable to alloc pbuf in init_dma\r\n");
677 return ERR_IF;
678 }
679 status = XEmacPs_BdRingAlloc(rxringptr, 1, &rxbd);
680 if (status != XST_SUCCESS) {
681 LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
682 pbuf_free(p);
683 return ERR_IF;
684 }
685 /* Enqueue to HW */
686 status = XEmacPs_BdRingToHw(rxringptr, 1, rxbd);
687 if (status != XST_SUCCESS) {
688 LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
689 pbuf_free(p);
690 XEmacPs_BdRingUnAlloc(rxringptr, 1, rxbd);
691 return ERR_IF;
692 }
693
694 bdindex = XEMACPS_BD_TO_INDEX(rxringptr, rxbd);
695 temp = (u32 *)rxbd;
696 *temp = 0;
697 if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
698 *temp = 0x00000002;
699 }
700 temp++;
701 *temp = 0;
702 dsb();
703 #ifdef ZYNQMP_USE_JUMBO
704 if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
705 Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)MAX_FRAME_SIZE_JUMBO);
706 }
707 #else
708 if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
709 Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)XEMACPS_MAX_FRAME_SIZE);
710 }
711 #endif
712 XEmacPs_BdSetAddressRx(rxbd, (UINTPTR)p->payload);
713
714 rx_pbufs_storage[index + bdindex] = (UINTPTR)p;
715 }
716 XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.RxBdRing.BaseBdAddr, 0, XEMACPS_RECV);
717 if (gigeversion > 2) {
718 XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, 1, XEMACPS_SEND);
719 }else {
720 XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, 0, XEMACPS_SEND);
721 }
722 if (gigeversion > 2)
723 {
724 /*
725 * This version of GEM supports priority queuing and the current
726 * driver is using tx priority queue 1 and normal rx queue for
727 * packet transmit and receive. The below code ensure that the
728 * other queue pointers are parked to known state for avoiding
729 * the controller to malfunction by fetching the descriptors
730 * from these queues.
731 */
732 XEmacPs_BdClear(bdrxterminate);
733 XEmacPs_BdSetAddressRx(bdrxterminate, (XEMACPS_RXBUF_NEW_MASK |
734 XEMACPS_RXBUF_WRAP_MASK));
735 XEmacPs_Out32((xemacpsif->emacps.Config.BaseAddress + XEMACPS_RXQ1BASE_OFFSET),
736 (UINTPTR)bdrxterminate);
737 XEmacPs_BdClear(bdtxterminate);
738 XEmacPs_BdSetStatus(bdtxterminate, (XEMACPS_TXBUF_USED_MASK |
739 XEMACPS_TXBUF_WRAP_MASK));
740 XEmacPs_Out32((xemacpsif->emacps.Config.BaseAddress + XEMACPS_TXQBASE_OFFSET),
741 (UINTPTR)bdtxterminate);
742 }
743
744 /*
745 * Connect the device driver handler that will be called when an
746 * interrupt for the device occurs, the handler defined above performs
747 * the specific interrupt processing for the device.
748 */
749 // XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
750 // (Xil_ExceptionHandler)XEmacPs_IntrHandler,
751 // (void *)&xemacpsif->emacps);
752 /*
753 * Enable the interrupt for emacps.
754 */
755 // XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
756 emac_intr_num = (u32) xtopologyp->scugic_emac_intr;
757 return 0;
758 }
759
760 /*
761 * resetrx_on_no_rxdata():
762 *
763 * It is called at regular intervals through the API xemacpsif_resetrx_on_no_rxdata
764 * called by the user.
765 * The EmacPs has a HW bug (SI# 692601) on the Rx path for heavy Rx traffic.
766 * Under heavy Rx traffic because of the HW bug there are times when the Rx path
767 * becomes unresponsive. The workaround for it is to check for the Rx path for
768 * traffic (by reading the stats registers regularly). If the stats register
769 * does not increment for sometime (proving no Rx traffic), the function resets
770 * the Rx data path.
771 *
772 */
773
resetrx_on_no_rxdata(xemacpsif_s * xemacpsif)774 void resetrx_on_no_rxdata(xemacpsif_s *xemacpsif)
775 {
776 u32_t regctrl;
777 u32_t tempcntr;
778 u32_t gigeversion;
779
780 gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
781 if (gigeversion == 2) {
782 tempcntr = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXCNT_OFFSET);
783 if ((!tempcntr) && (!(xemacpsif->last_rx_frms_cntr))) {
784 regctrl = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
785 XEMACPS_NWCTRL_OFFSET);
786 regctrl &= (~XEMACPS_NWCTRL_RXEN_MASK);
787 XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
788 XEMACPS_NWCTRL_OFFSET, regctrl);
789 regctrl = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_NWCTRL_OFFSET);
790 regctrl |= (XEMACPS_NWCTRL_RXEN_MASK);
791 XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_NWCTRL_OFFSET, regctrl);
792 }
793 xemacpsif->last_rx_frms_cntr = tempcntr;
794 }
795 }
796
free_txrx_pbufs(xemacpsif_s * xemacpsif)797 void free_txrx_pbufs(xemacpsif_s *xemacpsif)
798 {
799 s32_t index;
800 s32_t index1;
801 struct pbuf *p;
802
803 index1 = get_base_index_txpbufsstorage (xemacpsif);
804
805 for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
806 if (tx_pbufs_storage[index] != 0) {
807 p = (struct pbuf *)tx_pbufs_storage[index];
808 pbuf_free(p);
809 tx_pbufs_storage[index] = 0;
810 }
811 }
812
813 for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
814 p = (struct pbuf *)rx_pbufs_storage[index];
815 pbuf_free(p);
816
817 }
818 }
819
free_onlytx_pbufs(xemacpsif_s * xemacpsif)820 void free_onlytx_pbufs(xemacpsif_s *xemacpsif)
821 {
822 s32_t index;
823 s32_t index1;
824 struct pbuf *p;
825
826 index1 = get_base_index_txpbufsstorage (xemacpsif);
827 for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
828 if (tx_pbufs_storage[index] != 0) {
829 p = (struct pbuf *)tx_pbufs_storage[index];
830 pbuf_free(p);
831 tx_pbufs_storage[index] = 0;
832 }
833 }
834 }
835
836 /* reset Tx and Rx DMA pointers after XEmacPs_Stop */
reset_dma(struct xemac_s * xemac)837 void reset_dma(struct xemac_s *xemac)
838 {
839 u8 txqueuenum;
840 u32_t gigeversion;
841 xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
842 XEmacPs_BdRing *txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
843 XEmacPs_BdRing *rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
844
845 XEmacPs_BdRingPtrReset(txringptr, xemacpsif->tx_bdspace);
846 XEmacPs_BdRingPtrReset(rxringptr, xemacpsif->rx_bdspace);
847
848 gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
849 if (gigeversion > 2) {
850 txqueuenum = 1;
851 } else {
852 txqueuenum = 0;
853 }
854
855 XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.RxBdRing.BaseBdAddr, 0, XEMACPS_RECV);
856 XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, txqueuenum, XEMACPS_SEND);
857 }
858
emac_disable_intr(void)859 void emac_disable_intr(void)
860 {
861 // XScuGic_DisableIntr(INTC_DIST_BASE_ADDR, emac_intr_num);
862 rt_hw_interrupt_mask(emac_intr_num);
863 }
864
emac_enable_intr(void)865 void emac_enable_intr(void)
866 {
867 // XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, emac_intr_num);
868 rt_hw_interrupt_umask(emac_intr_num);
869 }
870