1 /*
2  * Copyright (c) 2014 Christopher Anderson
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <assert.h>
9 #include <lk/console_cmd.h>
10 #include <lk/debug.h>
11 #include <lk/list.h>
12 #include <lk/err.h>
13 #include <errno.h>
14 #include <lk/reg.h>
15 #include <endian.h>
16 #include <stdio.h>
17 #include <string.h>
18 #include <malloc.h>
19 #include <lk/trace.h>
20 #include <lk/bits.h>
21 #include <lk/pow2.h>
22 #include <sys/types.h>
23 #include <lib/cbuf.h>
24 #include <kernel/timer.h>
25 #include <kernel/thread.h>
26 #include <kernel/vm.h>
27 #include <kernel/spinlock.h>
28 #include <kernel/debug.h>
29 #include <platform/interrupts.h>
30 #include <platform/debug.h>
31 #include <platform/gem.h>
32 #include <platform.h>
33 #include <kernel/event.h>
34 #include <kernel/semaphore.h>
35 
36 #include <lib/pktbuf.h>
37 #include <lib/pool.h>
38 
39 #define LOCAL_TRACE         0
40 
41 /* Allow targets to override these values */
42 #ifndef GEM_RX_DESC_CNT
43 #define GEM_RX_DESC_CNT     32
44 #endif
45 
46 #ifndef GEM_TX_DESC_CNT
47 #define GEM_TX_DESC_CNT      32
48 #endif
49 
50 #ifndef GEM_RX_BUF_SIZE
51 #define GEM_RX_BUF_SIZE     1536
52 #endif
53 
54 #ifndef GEM_TX_BUF_SIZE
55 #define GEM_TX_BUF_SIZE     1536
56 #endif
57 
58 pool_t rx_buf_pool;
59 static spin_lock_t lock = SPIN_LOCK_INITIAL_VALUE;
60 
61 struct gem_desc {
62     uint32_t addr;
63     uint32_t ctrl;
64 };
65 
66 /* Quick overview:
67  * RX:
68  *  rx_tbl contains rx descriptors. A pktbuf is allocated for each of these and a descriptor
69  *  entry in the table points to a buffer in the pktbuf. rx_tbl[X]'s pktbuf is stored in rx_pbufs[X]
70  *
71  * TX:
72  *  The current position to write new tx descriptors to is maintained by gem.tx_head. As frames are
73  *  queued in tx_tbl their pktbufs are stored in the list queued_pbufs. As frame transmission is
74  *  completed these pktbufs are released back to the pool by the interrupt handler for TX_COMPLETE
75  */
76 struct gem_descs {
77     struct gem_desc rx_tbl[GEM_RX_DESC_CNT];
78     struct gem_desc tx_tbl[GEM_TX_DESC_CNT];
79 };
80 
81 struct gem_state {
82     volatile struct gem_regs *regs;
83 
84     struct gem_descs *descs;
85     paddr_t descs_phys;
86 
87     unsigned int tx_head;
88     unsigned int tx_tail;
89     unsigned int tx_count;
90     struct list_node tx_queue;
91     struct list_node queued_pbufs;
92 
93     gem_cb_t rx_callback;
94     event_t rx_pending;
95     event_t tx_complete;
96     bool debug_rx;
97     pktbuf_t *rx_pbufs[GEM_RX_DESC_CNT];
98 };
99 
100 struct gem_state gem;
101 
debug_rx_handler(pktbuf_t * p)102 static void debug_rx_handler(pktbuf_t *p) {
103     static uint32_t pkt = 0;
104 
105     printf("[%10u] packet %u, %zu bytes:\n", (uint32_t)current_time(), ++pkt, p->dlen);
106     hexdump8(p->data, p->dlen);
107     putchar('\n');
108 }
109 
free_completed_pbuf_frames(void)110 static int free_completed_pbuf_frames(void) {
111     int ret = 0;
112 
113     gem.regs->tx_status = gem.regs->tx_status;
114 
115     while (gem.tx_count > 0 &&
116             (gem.descs->tx_tbl[gem.tx_tail].ctrl & TX_DESC_USED)) {
117 
118         bool eof;
119         do {
120             pktbuf_t *p = list_remove_head_type(&gem.queued_pbufs, pktbuf_t, list);
121             DEBUG_ASSERT(p);
122             eof = p->flags & PKTBUF_FLAG_EOF;
123             ret += pktbuf_free(p, false);
124         } while (!eof);
125 
126         gem.tx_tail = (gem.tx_tail + 1) % GEM_TX_DESC_CNT;
127         gem.tx_count--;
128     }
129 
130     return ret;
131 }
132 
queue_pkts_in_tx_tbl(void)133 static void queue_pkts_in_tx_tbl(void) {
134     pktbuf_t *p;
135     unsigned int cur_pos;
136 
137     if (list_is_empty(&gem.tx_queue)) {
138         return;
139     }
140 
141     // XXX handle multi part buffers
142 
143     /* Queue packets in the descriptor table until we're either out of space in the table
144      * or out of packets in our tx queue. Any packets left will remain in the list and be
145      * processed the next time available */
146     while (gem.tx_count < GEM_TX_DESC_CNT &&
147             ((p = list_remove_head_type(&gem.tx_queue, pktbuf_t, list)) != NULL)) {
148         cur_pos = gem.tx_head;
149 
150         uint32_t addr = pktbuf_data_phys(p);
151         uint32_t ctrl = gem.descs->tx_tbl[cur_pos].ctrl & TX_DESC_WRAP; /* protect the wrap bit */
152         ctrl |= TX_BUF_LEN(p->dlen);
153 
154         DEBUG_ASSERT(p->flags & PKTBUF_FLAG_EOF); // a multi part buffer would have caused a race condition w/hardware
155         if (p->flags & PKTBUF_FLAG_EOF) {
156             ctrl |= TX_LAST_BUF;
157         }
158 
159         /* fill in the descriptor, control word last (in case hardware is racing us) */
160         gem.descs->tx_tbl[cur_pos].addr = addr;
161         gem.descs->tx_tbl[cur_pos].ctrl = ctrl;
162 
163         gem.tx_head = (gem.tx_head + 1) % GEM_TX_DESC_CNT;
164         gem.tx_count++;
165         list_add_tail(&gem.queued_pbufs, &p->list);
166     }
167 
168     DMB;
169     gem.regs->net_ctrl |= NET_CTRL_START_TX;
170 }
171 
gem_send_raw_pkt(struct pktbuf * p)172 int gem_send_raw_pkt(struct pktbuf *p) {
173     status_t ret = NO_ERROR;
174 
175     if (!p || !p->dlen) {
176         ret = -1;
177         goto err;
178     }
179 
180     /* make sure the output buffer is fully written to memory before
181      * placing on the outgoing list. */
182 
183     // XXX handle multi part buffers
184     arch_clean_cache_range((vaddr_t)p->data, p->dlen);
185 
186     spin_lock_saved_state_t irqstate;
187     spin_lock_irqsave(&lock, irqstate);
188     list_add_tail(&gem.tx_queue, &p->list);
189     queue_pkts_in_tx_tbl();
190     spin_unlock_irqrestore(&lock, irqstate);
191 
192 err:
193     return ret;
194 }
195 
196 
gem_int_handler(void * arg)197 static enum handler_return gem_int_handler(void *arg) {
198     uint32_t intr_status;
199     bool resched = false;
200 
201     intr_status = gem.regs->intr_status;
202 
203     spin_lock(&lock);
204 
205     while (intr_status) {
206         // clear any pending status
207         gem.regs->intr_status = intr_status;
208 
209         // Received an RX complete
210         if (intr_status & INTR_RX_COMPLETE) {
211             event_signal(&gem.rx_pending, false);
212 
213             gem.regs->rx_status |= INTR_RX_COMPLETE;
214 
215             resched = true;
216         }
217 
218         if (intr_status & INTR_RX_USED_READ) {
219 
220             for (int i = 0; i < GEM_RX_DESC_CNT; i++) {
221                 gem.descs->rx_tbl[i].addr &= ~RX_DESC_USED;
222             }
223 
224             gem.regs->rx_status &= ~RX_STATUS_BUFFER_NOT_AVAIL;
225             gem.regs->net_ctrl &= ~NET_CTRL_RX_EN;
226             gem.regs->net_ctrl |= NET_CTRL_RX_EN;
227             printf("GEM overflow, dumping pending packets\n");
228         }
229 
230         if (intr_status & INTR_TX_CORRUPT) {
231             printf("tx ahb error!\n");
232             if (free_completed_pbuf_frames() > 0) {
233                 resched = true;
234             }
235         }
236 
237         /* A frame has been completed so we can clean up ownership of its buffers */
238         if (intr_status & INTR_TX_COMPLETE) {
239             if (free_completed_pbuf_frames() > 0) {
240                 resched = true;
241             }
242         }
243 
244         /* The controller has processed packets until it hit a buffer owned by the driver */
245         if (intr_status & INTR_TX_USED_READ) {
246             queue_pkts_in_tx_tbl();
247             gem.regs->tx_status |= TX_STATUS_USED_READ;
248         }
249 
250         /* see if we have any more */
251         intr_status = gem.regs->intr_status;
252     }
253 
254     spin_unlock(&lock);
255 
256     return (resched) ? INT_RESCHEDULE : INT_NO_RESCHEDULE;
257 }
258 
wait_for_phy_idle(void)259 static bool wait_for_phy_idle(void) {
260     int iters = 1000;
261     while (iters && !(gem.regs->net_status & NET_STATUS_PHY_MGMT_IDLE)) {
262         iters--;
263     }
264 
265     if (iters == 0) {
266         return false;
267     }
268 
269     return true;
270 }
271 
gem_phy_init(void)272 static bool gem_phy_init(void) {
273     return wait_for_phy_idle();
274 }
275 
gem_cfg_buffer_descs(void)276 static status_t gem_cfg_buffer_descs(void) {
277     void *rx_buf_vaddr;
278     status_t ret;
279 
280 
281     if ((ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_rx_bufs",
282                                     GEM_RX_DESC_CNT * GEM_RX_BUF_SIZE,  (void **) &rx_buf_vaddr, 0, 0,
283                                     ARCH_MMU_FLAG_CACHED)) < 0) {
284         return ret;
285     }
286 
287     /* Take pktbufs from the allocated target pool and assign them to the gem RX
288      * descriptor table */
289     pool_init(&rx_buf_pool, GEM_RX_BUF_SIZE, CACHE_LINE, GEM_RX_DESC_CNT, rx_buf_vaddr);
290     for (unsigned int n = 0; n < GEM_RX_DESC_CNT; n++) {
291         void *b = pool_alloc(&rx_buf_pool);
292         pktbuf_t *p = pktbuf_alloc_empty();
293         if (!p || !b) {
294             return -1;
295         }
296 
297         pktbuf_add_buffer(p, b, GEM_RX_BUF_SIZE, 0, PKTBUF_FLAG_CACHED, NULL, NULL);
298         gem.rx_pbufs[n] = p;
299         gem.descs->rx_tbl[n].addr = (uintptr_t) p->phys_base;
300         gem.descs->rx_tbl[n].ctrl = 0;
301     }
302 
303     /* Claim ownership of TX descriptors for the driver */
304     for (unsigned i = 0; i < GEM_TX_DESC_CNT; i++) {
305         gem.descs->tx_tbl[i].addr = 0;
306         gem.descs->tx_tbl[i].ctrl = TX_DESC_USED;
307     }
308 
309     /* Both set of descriptors need wrap bits set at the end of their tables*/
310     gem.descs->rx_tbl[GEM_RX_DESC_CNT-1].addr |= RX_DESC_WRAP;
311     gem.descs->tx_tbl[GEM_TX_DESC_CNT-1].ctrl |= TX_DESC_WRAP;
312 
313     /* Point the controller at the offset into state's physical location for RX descs */
314     gem.regs->rx_qbar = ((uintptr_t)&gem.descs->rx_tbl[0] - (uintptr_t)gem.descs) + gem.descs_phys;
315     gem.regs->tx_qbar = ((uintptr_t)&gem.descs->tx_tbl[0] - (uintptr_t)gem.descs) + gem.descs_phys;
316 
317     return NO_ERROR;
318 }
319 
gem_cfg_ints(void)320 static void gem_cfg_ints(void) {
321     uint32_t gem_base = (uintptr_t)gem.regs;
322 
323     if (gem_base == GEM0_BASE) {
324         register_int_handler(ETH0_INT, gem_int_handler, NULL);
325         unmask_interrupt(ETH0_INT);
326     } else if (gem_base == GEM1_BASE) {
327         register_int_handler(ETH1_INT, gem_int_handler, NULL);
328         unmask_interrupt(ETH1_INT);
329     } else {
330         printf("Illegal gem periph base address 0x%08X!\n", gem_base);
331         return;
332     }
333 
334     /* Enable all interrupts */
335     gem.regs->intr_en = INTR_RX_COMPLETE | INTR_TX_COMPLETE | INTR_HRESP_NOT_OK | INTR_MGMT_SENT |
336                         INTR_RX_USED_READ | INTR_TX_CORRUPT | INTR_TX_USED_READ | INTR_RX_OVERRUN;
337 }
338 
gem_rx_thread(void * arg)339 static int gem_rx_thread(void *arg) {
340     pktbuf_t *p;
341     int bp = 0;
342 
343     while (1) {
344         event_wait(&gem.rx_pending);
345 
346         for (;;) {
347             if (gem.descs->rx_tbl[bp].addr & RX_DESC_USED) {
348                 uint32_t ctrl = gem.descs->rx_tbl[bp].ctrl;
349 
350                 p = gem.rx_pbufs[bp];
351                 p->dlen = RX_BUF_LEN(ctrl);
352                 p->data = p->buffer + 2;
353 
354                 /* copy the checksum offloading bits */
355                 p->flags = 0;
356                 p->flags |= (BITS_SHIFT(ctrl, 23, 22) != 0) ? PKTBUF_FLAG_CKSUM_IP_GOOD : 0;
357                 p->flags |= (BITS_SHIFT(ctrl, 23, 22) == 1) ? PKTBUF_FLAG_CKSUM_UDP_GOOD : 0;
358                 p->flags |= (BITS_SHIFT(ctrl, 23, 22) == 2) ? PKTBUF_FLAG_CKSUM_TCP_GOOD : 0;
359 
360                 /* invalidate any stale cache lines on the receive buffer to ensure
361                  * the cpu has a fresh copy of incomding data. */
362                 arch_invalidate_cache_range((vaddr_t)p->data, p->dlen);
363 
364                 if (unlikely(gem.debug_rx)) {
365                     debug_rx_handler(p);
366                 }
367 
368                 if (likely(gem.rx_callback)) {
369                     gem.rx_callback(p);
370                 }
371 
372                 /* make sure all dirty data is flushed out of the buffer before
373                  * putting into the receive queue */
374                 arch_clean_invalidate_cache_range((vaddr_t)p->buffer, PKTBUF_SIZE);
375 
376                 gem.descs->rx_tbl[bp].addr &= ~RX_DESC_USED;
377                 gem.descs->rx_tbl[bp].ctrl = 0;
378                 bp = (bp + 1) % GEM_RX_DESC_CNT;
379             } else {
380                 break;
381             }
382         }
383     }
384 
385     return 0;
386 }
387 
388 
gem_stat_thread(void * arg)389 static int gem_stat_thread(void *arg) {
390     volatile bool *run = ((bool *)arg);
391     static uint32_t frames_rx = 0, frames_tx = 0;
392 
393     while (*run) {
394         frames_tx += gem.regs->frames_tx;
395         frames_rx += gem.regs->frames_rx;
396         printf("GEM tx_head %u, tx_tail %u, tx_count %u, tx_frames %u, rx_frames %u\n",
397                gem.tx_head, gem.tx_tail, gem.tx_count, frames_tx, frames_rx);
398         thread_sleep(1000);
399     }
400 
401     return 0;
402 }
403 
gem_deinit(uintptr_t base)404 static void gem_deinit(uintptr_t base) {
405     /* reset the gem peripheral */
406     uint32_t rst_mask;
407     if (base == GEM0_BASE) {
408         rst_mask = (1<<6) | (1<<4) | (1<<0);
409     } else {
410         rst_mask = (1<<7) | (1<<5) | (1<<1);
411     }
412     SLCR->GEM_RST_CTRL |= rst_mask;
413     spin(1);
414     SLCR->GEM_RST_CTRL &= ~rst_mask;
415 
416 
417     /* Clear Network control / status registers */
418     gem.regs->net_ctrl |= NET_CTRL_STATCLR;
419     gem.regs->rx_status = 0x0F;
420     gem.regs->tx_status = 0xFF;
421     /* Disable interrupts */
422     gem.regs->intr_dis  = 0x7FFFEFF;
423 
424     /* Empty out the buffer queues */
425     gem.regs->rx_qbar = 0;
426     gem.regs->tx_qbar = 0;
427 }
428 
gem_init(uintptr_t gem_base)429 status_t gem_init(uintptr_t gem_base) {
430     status_t ret;
431     uint32_t reg_val;
432     thread_t *rx_thread;
433     void *descs_vaddr;
434     paddr_t descs_paddr;
435 
436     DEBUG_ASSERT(gem_base == GEM0_BASE || gem_base == GEM1_BASE);
437 
438     /* Data structure init */
439     event_init(&gem.tx_complete, false, EVENT_FLAG_AUTOUNSIGNAL);
440     event_init(&gem.rx_pending, false, EVENT_FLAG_AUTOUNSIGNAL);
441     list_initialize(&gem.queued_pbufs);
442     list_initialize(&gem.tx_queue);
443 
444     /* allocate a block of uncached contiguous memory for the peripheral descriptors */
445     if ((ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_desc",
446                                     sizeof(*gem.descs), &descs_vaddr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE)) < 0) {
447         return ret;
448     }
449     descs_paddr = vaddr_to_paddr((void *)descs_vaddr);
450 
451     /* tx/rx descriptor tables and memory mapped registers */
452     gem.descs = (void *)descs_vaddr;
453     gem.descs_phys = descs_paddr;
454     gem.regs = (struct gem_regs *)gem_base;
455 
456     /* rx background thread */
457     rx_thread = thread_create("gem_rx", gem_rx_thread, NULL, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
458     thread_resume(rx_thread);
459 
460     /* Bring whatever existing configuration is up down so we can do it cleanly */
461     gem_deinit(gem_base);
462     gem_cfg_buffer_descs();
463 
464     /* Self explanatory configuration for the gige */
465     reg_val  = NET_CFG_FULL_DUPLEX;
466     reg_val |= NET_CFG_GIGE_EN;
467     reg_val |= NET_CFG_SPEED_100;
468     reg_val |= NET_CFG_RX_CHKSUM_OFFLD_EN;
469     reg_val |= NET_CFG_FCS_REMOVE;
470     reg_val |= NET_CFG_MDC_CLK_DIV(0x7);
471     reg_val |= NET_CFG_RX_BUF_OFFSET(2);
472     gem.regs->net_cfg = reg_val;
473 
474     /* Set DMA to 1600 byte rx buffer, 8KB addr space for rx, 4KB addr space for tx,
475      * hw checksumming, little endian, and use INCR16 ahb bursts
476      */
477     reg_val  = DMA_CFG_AHB_MEM_RX_BUF_SIZE(0x19);
478     reg_val |= DMA_CFG_RX_PKTBUF_MEMSZ_SEL(0x3);
479     reg_val |= DMA_CFG_TX_PKTBUF_MEMSZ_SEL;
480     reg_val |= DMA_CFG_CSUM_GEN_OFFLOAD_EN;
481     reg_val |= DMA_CFG_AHB_FIXED_BURST_LEN(0x10);
482     gem.regs->dma_cfg = reg_val;
483 
484     /* Enable VREF from GPIOB */
485     SLCR_REG(GPIOB_CTRL) = 0x1;
486 
487     ret = gem_phy_init();
488     if (!ret) {
489         printf("Phy not idle, aborting!\n");
490         return ret;
491     }
492 
493     gem_cfg_ints();
494 
495     reg_val  = NET_CTRL_MD_EN;
496     reg_val |= NET_CTRL_RX_EN;
497     reg_val |= NET_CTRL_TX_EN;
498     gem.regs->net_ctrl = reg_val;
499 
500     return NO_ERROR;
501 }
502 
gem_disable(void)503 void gem_disable(void) {
504     /* disable all the interrupts */
505     gem.regs->intr_en = 0;
506     mask_interrupt(ETH0_INT);
507 
508     /* stop tx and rx */
509     gem.regs->net_ctrl = 0;
510 }
511 
gem_set_callback(gem_cb_t rx)512 void gem_set_callback(gem_cb_t rx) {
513     gem.rx_callback = rx;
514 }
515 
gem_set_macaddr(uint8_t mac[6])516 void gem_set_macaddr(uint8_t mac[6]) {
517     uint32_t en = gem.regs->net_ctrl &= NET_CTRL_RX_EN | NET_CTRL_TX_EN;
518 
519     if (en) {
520         gem.regs->net_ctrl &= ~(en);
521     }
522 
523     /* _top register must be written after _bot register */
524     gem.regs->spec_addr1_bot = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0];
525     gem.regs->spec_addr1_top = (mac[5] << 8) | mac[4];
526 
527     if (en) {
528         gem.regs->net_ctrl |= en;
529     }
530 }
531 
532 
533 /* Debug console commands */
cmd_gem(int argc,const console_cmd_args * argv)534 static int cmd_gem(int argc, const console_cmd_args *argv) {
535     static uint32_t frames_rx = 0;
536     static uint32_t frames_tx = 0;
537     static bool run_stats = false;
538     thread_t *stat_thread;
539 
540     if (argc == 1) {
541         printf("gem raw <iter> <length>: Send <iter> raw mac packet for testing\n");
542         printf("gem rx_debug:      toggle RX debug output\n");
543         printf("gem stats          toggle periodic output of driver stats\n");
544         printf("gem status:        print driver status\n");
545     } else if (strncmp(argv[1].str, "rx_debug", sizeof("rx_debug")) == 0) {
546         pktbuf_t *p;
547         int iter;
548         if (argc < 4) {
549             return 0;
550         }
551 
552         if ((p = pktbuf_alloc()) == NULL) {
553             printf("out of buffers\n");
554         }
555 
556         iter = argv[2].u;
557         p->dlen = argv[3].u;
558         while (iter--) {
559             memset(p->data, iter, 12);
560             gem_send_raw_pkt(p);
561         }
562     } else if (strncmp(argv[1].str, "status", sizeof("status")) == 0) {
563         uint32_t mac_top = gem.regs->spec_addr1_top;
564         uint32_t mac_bot = gem.regs->spec_addr1_bot;
565         printf("mac addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
566                mac_top >> 8, mac_top & 0xFF, mac_bot >> 24, (mac_bot >> 16) & 0xFF,
567                (mac_bot >> 8) & 0xFF, mac_bot & 0xFF);
568         uint32_t rx_used = 0, tx_used = 0;
569         for (int i = 0; i < GEM_RX_DESC_CNT; i++) {
570             rx_used += !!(gem.descs->rx_tbl[i].addr & RX_DESC_USED);
571         }
572 
573         for (int i = 0; i < GEM_TX_DESC_CNT; i++) {
574             tx_used += !!(gem.descs->tx_tbl[i].ctrl & TX_DESC_USED);
575         }
576 
577         frames_tx += gem.regs->frames_tx;
578         frames_rx += gem.regs->frames_rx;
579         printf("rx usage: %u/%u, tx usage %u/%u\n",
580                rx_used, GEM_RX_DESC_CNT, tx_used, GEM_TX_DESC_CNT);
581         printf("frames rx: %u, frames tx: %u\n",
582                frames_rx, frames_tx);
583         printf("tx:\n");
584         for (size_t i = 0; i < GEM_TX_DESC_CNT; i++) {
585             uint32_t ctrl = gem.descs->tx_tbl[i].ctrl;
586             uint32_t addr = gem.descs->tx_tbl[i].addr;
587 
588             printf("%3zu 0x%08X 0x%08X: len %u, %s%s%s %s%s\n",
589                    i, addr, ctrl, TX_BUF_LEN(ctrl),
590                    (ctrl & TX_DESC_USED) ? "driver " : "controller ",
591                    (ctrl & TX_DESC_WRAP) ? "wrap " : "",
592                    (ctrl & TX_LAST_BUF) ? "eof " : "",
593                    (i == gem.tx_head) ? "<-- HEAD " : "",
594                    (i == gem.tx_tail) ? "<-- TAIL " : "");
595         }
596 
597     } else if (strncmp(argv[1].str, "stats", sizeof("stats")) == 0) {
598         run_stats = !run_stats;
599         if (run_stats) {
600             stat_thread = thread_create("gem_stat",
601                                         gem_stat_thread, &run_stats, LOW_PRIORITY, DEFAULT_STACK_SIZE);
602             thread_resume(stat_thread);
603         }
604     } else if (argv[1].str[0] == 'd') {
605         gem.debug_rx = !gem.debug_rx;
606     }
607 
608     return 0;
609 }
610 
611 STATIC_COMMAND_START
612 STATIC_COMMAND("gem", "ZYNQ GEM commands", &cmd_gem)
613 STATIC_COMMAND_END(gem);
614