1 /*
2 * Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
3 *
4 * Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
5 * the the People's Republic of China and other countries.
6 * All Allwinner Technology Co.,Ltd. trademarks are used with permission.
7 *
8 * DISCLAIMER
9 * THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
10 * IF YOU NEED TO INTEGRATE THIRD PARTY’S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
11 * IN ALLWINNERS’SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
12 * ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
13 * ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
14 * COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
15 * YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY’S TECHNOLOGY.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
19 * PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
20 * WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
21 * THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
22 * OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 * IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
30 * OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <errno.h>
34 #include <typedef.h>
35 #include <hal_mem.h>
36 #include <sunxi_hal_geth.h>
37 #include <lwip/pbuf.h>
38 #include <netif/ethernet.h>
39 #include <sunxi_hal_mii.h>
40 #include <sunxi_hal_miiphy.h>
41 #include <rtthread.h>
42 #include <netif/ethernetif.h>
43 #ifdef RT_USING_SMART
44 #include <page.h>
45 #include <ioremap.h>
46 #endif
47 #include <arch.h>
48
49 static struct geth_device rt_geth_dev;
50 static unsigned int rx_clean = 0;
51
gmac_rx_desc_fill(rt_device_t dev)52 static int gmac_rx_desc_fill(rt_device_t dev)
53 {
54 unsigned int i = 0;
55 unsigned int count = 0;
56 hal_geth_dma_desc_t *rx_p = NULL;
57
58 for(i=0; i<DMA_DESC_RX_NUM; i++)
59 {
60 rx_p = rt_geth_dev.get_buffer_config.dma_desc_rx + count;
61 rx_p->desc0.rx.own = 1;
62 rx_p->desc1.all |= ((1 << 11) - 1);
63 count = circ_inc(count, DMA_DESC_RX_NUM);
64 }
65 count = 0;
66 return 0;
67 }
68
desc_buf_set(struct dma_desc * desc,unsigned long paddr,int size)69 void desc_buf_set(struct dma_desc *desc, unsigned long paddr, int size)
70 {
71 desc->desc1.all &= (~((1 << 11) - 1));
72 desc->desc1.all |= (size & ((1 << 11) - 1));
73 }
74
desc_set_own(struct dma_desc * desc)75 void desc_set_own(struct dma_desc *desc)
76 {
77 desc->desc0.all |= 0x80000000;
78 }
79
desc_tx_close(struct dma_desc * first,struct dma_desc * end,int csum_insert)80 void desc_tx_close(struct dma_desc *first, struct dma_desc *end, int csum_insert)
81 {
82 struct dma_desc *desc = first;
83
84 first->desc1.tx.first_sg = 1;
85 end->desc1.tx.last_seg = 1;
86 end->desc1.tx.interrupt = 1;
87
88 if (csum_insert)
89 {
90 do
91 {
92 desc->desc1.tx.cic = 3;
93 desc++;
94 } while (desc <= end);
95
96 }
97 }
98
desc_get_own(struct dma_desc * desc)99 int desc_get_own(struct dma_desc *desc)
100 {
101 return desc->desc0.all & 0x80000000;
102 }
103
desc_tag_clean(struct dma_desc * desc)104 void desc_tag_clean(struct dma_desc *desc)
105 {
106 desc->resever0 = 0;
107 }
108
desc_add_tag(struct dma_desc * desc)109 void desc_add_tag(struct dma_desc *desc)
110 {
111 desc->resever0 = 1;
112 }
113
geth_phy_read(char * devname,unsigned int phy_addr,unsigned char reg,unsigned short * data)114 static int geth_phy_read(char *devname, unsigned int phy_addr, unsigned char reg,unsigned short *data)
115 {
116 struct eth_device *dev;
117 uint32_t value;
118
119 value = geth_mdio_read(rt_geth_dev.iobase, phy_addr, reg);
120
121 return value;
122 }
123
geth_phy_write(char * devname,unsigned int phy_addr,unsigned char reg,uint16_t data)124 static int geth_phy_write(char *devname, unsigned int phy_addr, unsigned char reg, uint16_t data)
125 {
126 struct eth_device *dev;
127
128 geth_mdio_write(rt_geth_dev.iobase, phy_addr, reg, data);
129
130 return 0;
131 }
132
geth_link_change(struct geth_device * dev,rt_bool_t up)133 void geth_link_change(struct geth_device *dev,rt_bool_t up)
134 {
135 if(up)
136 {
137 printf("link up\n");
138 eth_device_linkchange(&dev->parent, RT_TRUE);
139 dev->phy_link_status = RT_TRUE;
140 }
141 else
142 {
143 printf("link down\n");
144 eth_device_linkchange(&dev->parent, RT_FALSE);
145 dev->phy_link_status = RT_FALSE;
146 }
147 }
148
read_data_from_eth(rt_device_t dev,struct pbuf * p,uint16_t * read_length)149 int read_data_from_eth(rt_device_t dev,struct pbuf *p,uint16_t *read_length)
150 {
151 unsigned int i = 0;
152 int ret = -1;
153 hal_geth_dma_desc_t *rx_p = NULL;
154 unsigned int delay_count = 0;
155 struct pbuf *q = NULL;
156 int length = 0;
157 int offset = 0;
158 rx_p = rt_geth_dev.get_buffer_config.dma_desc_rx + rx_clean;
159
160 awos_arch_mems_flush_dcache_region((unsigned long)rx_p,sizeof(hal_geth_dma_desc_t));
161 awos_arch_mems_flush_dcache_region((unsigned long)rx_p->desc2,2048);
162 dsb(v);
163 if(!rx_p->desc0.rx.own)
164 {
165 length = rx_p->desc0.rx.frm_len;
166 for(q = p;q != RT_NULL;q=q->next)
167 {
168 if((length - PBUF_MAX_BUFF_SIZE) <= 0)
169 {
170 rt_memcpy(q->payload,(void *)((unsigned long)rx_p->desc2 + offset),length);
171 offset += length;
172 break;
173 }
174 else
175 {
176 rt_memcpy(q->payload,(void *)((unsigned long)rx_p->desc2 + offset),PBUF_MAX_BUFF_SIZE);
177 offset += PBUF_MAX_BUFF_SIZE;
178 length -= PBUF_MAX_BUFF_SIZE;
179 }
180 }
181
182 if(offset != (uint16_t)(rx_p->desc0.rx.frm_len))
183 {
184 *read_length = 0;
185 ret = -1;
186 printf("have not enough pbuf for receive data offset %d length %d\n",offset,length);
187 }
188 else
189 {
190 *read_length = offset;
191 ret = 0;
192 }
193 rx_p->desc0.all = 0x80000000;
194
195 awos_arch_mems_clean_dcache_region((unsigned long)rx_p,sizeof(hal_geth_dma_desc_t));
196 rx_clean = circ_inc(rx_clean, DMA_DESC_RX_NUM);
197 }
198 else
199 {
200 read_length = 0;
201 return -1;
202 }
203
204 return ret;
205
206 }
tx_desc_recycle(rt_device_t dev)207 int tx_desc_recycle(rt_device_t dev)
208 {
209 struct dma_desc *desc;
210 static int tx_clean = 0;
211 desc = rt_geth_dev.get_buffer_config.dma_desc_tx + tx_clean;
212
213 awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
214 dsb(v);
215 while((!desc_get_own(desc))&&(desc->resever0))
216 {
217 desc_tag_clean(desc);
218 dsb(v);
219 awos_arch_mems_clean_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
220
221 tx_clean = circ_inc(tx_clean, DMA_DESC_TX_NUM);
222 desc = rt_geth_dev.get_buffer_config.dma_desc_tx + tx_clean;
223 dsb(v);
224 awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
225 dsb(v);
226 }
227 return 0;
228 }
229
geth_irq_handler(int irq,void * dev_id)230 static irqreturn_t geth_irq_handler(int irq, void *dev_id)
231 {
232 int int_sta_value;
233 int_sta_value = hal_readl(rt_geth_dev.iobase + GETH_INT_STA);
234 int length;
235 int ret = 0;
236
237 if(RX_INT & int_sta_value)
238 {
239 hal_writel(RX_INT,rt_geth_dev.iobase + GETH_INT_STA);
240 geth_rx_int_disable(rt_geth_dev.iobase);
241 eth_device_ready(&(rt_geth_dev.parent));
242
243 }
244 if(TX_INT & int_sta_value)
245 {
246 hal_writel(RX_INT,rt_geth_dev.iobase + GETH_INT_STA);
247 }
248
249 /*clear all interrupt status*/
250 hal_writel(int_sta_value,rt_geth_dev.iobase + GETH_INT_STA);
251
252 return 0;
253 }
254
is_enough_desc_available(struct dma_desc * entry)255 int is_enough_desc_available(struct dma_desc *entry)
256 {
257 struct pbuf *q = RT_NULL;
258 struct dma_desc *desc;
259 desc = entry;
260 awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
261 dsb(v);
262 if(desc->desc0.tx.own != 0)
263 {
264 printf("desc %08x desc0 %08x desc1 %08x\n",desc,desc->desc0,desc->desc1);
265 return -RT_ERROR;
266 }
267
268 return RT_EOK;
269 }
270
wait_tx_completed(struct dma_desc * entry)271 int wait_tx_completed(struct dma_desc *entry)
272 {
273 struct pbuf *q = RT_NULL;
274 struct dma_desc *desc;
275 unsigned int timeout_cnt = 0;
276 desc = entry;
277 awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
278 dsb(v);
279 while(desc_get_own(desc))
280 {
281 awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
282 dsb(v);
283 timeout_cnt++;
284 if(timeout_cnt > 1000)
285 {
286 printf("emac send data timeout \n");
287 return -RT_ERROR;
288 }
289 }
290 desc_tag_clean(desc);
291 dsb(v);
292 awos_arch_mems_clean_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
293
294 return RT_EOK;
295 }
296
rt_geth_xmit(rt_device_t dev,struct pbuf * p)297 static rt_err_t rt_geth_xmit(rt_device_t dev, struct pbuf *p)
298 {
299 unsigned int entry;
300 struct pbuf *q = RT_NULL;
301 struct dma_desc *first, *paddr ,*end;
302 static int tx_dirty = 0;
303 static int tx_clean = 0;
304 int desc_value = 0;
305 void *dist = NULL;
306 int ret = 0;
307 unsigned int i = 0;
308 unsigned int copy_offset = 0;
309
310 if (!rt_geth_dev.phy_link_status) return -RT_ERROR;
311
312 first = rt_geth_dev.get_buffer_config.dma_desc_tx + tx_dirty;
313 ret = is_enough_desc_available(first);
314 if(ret < 0)
315 {
316 return -RT_ERROR;
317 }
318
319 copy_offset = 0;
320 for(q = p;q != RT_NULL;q=q->next)
321 {
322 dist = (void *)((unsigned long)first->desc2);
323 rt_memcpy(dist+copy_offset,q->payload,q->len);
324 copy_offset += q->len;
325
326 if(copy_offset >= ((1 << 11) - 1))
327 {
328 printf("send data exceed max len copy_offset %d\n",copy_offset);
329 return -RT_ERROR;
330 }
331 }
332
333 desc_buf_set(first, 0, copy_offset);
334 desc_add_tag(first);
335 tx_dirty = circ_inc(tx_dirty, DMA_DESC_TX_NUM);
336 end = first;
337 desc_set_own(first);
338 desc_tx_close(first, end, 0);
339
340 dsb(v);
341 awos_arch_mems_clean_dcache_region((unsigned long)first,sizeof(hal_geth_dma_desc_t));
342 awos_arch_mems_clean_dcache_region((unsigned long)first->desc2,copy_offset);
343 dsb(v);
344
345 /* Enable transmit and Poll transmit */
346 geth_tx_poll(rt_geth_dev.iobase);
347 ret = wait_tx_completed(first);
348
349 return ret;
350 }
351
rt_geth_recv(rt_device_t dev)352 static struct pbuf *rt_geth_recv(rt_device_t dev)
353 {
354 static struct pbuf *p_s = RT_NULL;
355 struct pbuf *p = RT_NULL;
356 int status;
357 uint16_t length = 0;
358 static int first_flag = 0;
359
360 uint64_t before_get_data_tick;
361 uint64_t diff_tick0;
362 uint64_t diff_tick1;
363 uint64_t diff_tick2;
364 if(p_s == RT_NULL)
365 {
366 p_s = pbuf_alloc(PBUF_RAW, ENET_FRAME_MAX_FRAMELEN, PBUF_POOL);
367 if(p_s == RT_NULL)
368 {
369 return RT_NULL;
370 }
371 }
372 p = p_s;
373
374 status = read_data_from_eth(dev,p,&length);
375 if(status == -1)
376 {
377 geth_rx_int_enable(rt_geth_dev.iobase);
378 return NULL;
379 }
380
381 pbuf_realloc(p, length);
382 p_s = RT_NULL;
383 return p;
384 }
385
rx_status(hal_geth_dma_desc_t * p)386 static int rx_status(hal_geth_dma_desc_t *p)
387 {
388 int ret = good_frame;
389
390 if (p->desc0.rx.last_desc == 0)
391 ret = discard_frame;
392
393 if (p->desc0.rx.frm_type && (p->desc0.rx.chsum_err
394 || p->desc0.rx.ipch_err))
395 ret = discard_frame;
396
397 if (p->desc0.rx.err_sum)
398 ret = discard_frame;
399
400 if (p->desc0.rx.len_err)
401 ret = discard_frame;
402
403 if (p->desc0.rx.mii_err)
404 ret = discard_frame;
405
406 return ret;
407 }
408
geth_set_hwaddr(unsigned char * addr)409 static void geth_set_hwaddr(unsigned char *addr)
410 {
411 geth_set_mac_addr(rt_geth_dev.iobase, addr, 0);
412 }
413
geth_get_hwaddr(struct eth_device * dev)414 static void geth_get_hwaddr(struct eth_device *dev)
415 {
416 }
417
418 /********************************************************
419 RGMII RMI D1 F133
420 ---------------------------------------------------------
421 RXD3 / PE14 PG9 / PG9
422 RXD2 / PE13 PG8 PE13 PG8
423 RXD1 RXD1 PE2 PG2 PE2 PG2
424 RXD0 RXD0 PE1 PG1 PE1 PG1
425 RXCK / PE15 PG10 / PG10
426 RXCTRL CRS-DV PE0 PG0 PE0 PG0
427 TXD3 / PE12 PG7 PE12 PG7
428 TXD2 / PE11 PG6 PE11 PG6
429 TXD1 TXD1 PE5 PG5 PE5 PG5
430 TXD0 TXD0 PE4 PG4 PE4 PG4
431 TXCK TXCK PE3 PG3 PE3 PG3
432 TXCTL TXEN PE6 PG12 PE6 PG12
433 CLKIN RXER PE7 PG13 PE7 PG13
434 MDC MDC PE8 PG14 PE8 PG14
435 MDIO MDIO PE9 PG15 PE9 PG15
436 EPHY-25M EPHY-25M PE10 PG11 PE10 PG11
437 */
geth_pinctrl_init(void)438 static void geth_pinctrl_init(void)
439 {
440 #ifdef GMAC_USING_GPIOE
441 hal_gpio_pinmux_set_function(GPIO_PE0, 8);
442 hal_gpio_pinmux_set_function(GPIO_PE1, 8);
443 hal_gpio_pinmux_set_function(GPIO_PE2, 8);
444 hal_gpio_pinmux_set_function(GPIO_PE3, 8);
445 hal_gpio_pinmux_set_function(GPIO_PE4, 8);
446 hal_gpio_pinmux_set_function(GPIO_PE5, 8);
447 hal_gpio_pinmux_set_function(GPIO_PE6, 8);
448 hal_gpio_pinmux_set_function(GPIO_PE7, 8);
449 hal_gpio_pinmux_set_function(GPIO_PE8, 8);
450 hal_gpio_pinmux_set_function(GPIO_PE9, 8);
451 hal_gpio_pinmux_set_function(GPIO_PE10, 8);
452 #ifdef GMAC_USING_RGMII
453 hal_gpio_pinmux_set_function(GPIO_PE11, 8);
454 hal_gpio_pinmux_set_function(GPIO_PE12, 8);
455 hal_gpio_pinmux_set_function(GPIO_PE13, 8);
456 #ifdef BOARD_allwinnerd1
457 hal_gpio_pinmux_set_function(GPIO_PE14, 8);
458 hal_gpio_pinmux_set_function(GPIO_PE15, 8);
459 #endif /* BOARD_allwinnerd1 */
460
461 #ifdef BOARD_allwinnerd1s
462 hal_gpio_pinmux_set_function(GPIO_PG9, 4);
463 hal_gpio_pinmux_set_function(GPIO_PG10, 4);
464 #endif /* BOARD_allwinnerd1s */
465
466 #endif /* GMAC_USING_RGMII */
467 #endif /* GMAC_USING_GPIOE */
468
469 #ifdef GMAC_USING_GPIOG
470 hal_gpio_pinmux_set_function(GPIO_PG0, 4);
471 hal_gpio_pinmux_set_function(GPIO_PG1, 4);
472 hal_gpio_pinmux_set_function(GPIO_PG2, 4);
473 hal_gpio_pinmux_set_function(GPIO_PG3, 4);
474 hal_gpio_pinmux_set_function(GPIO_PG4, 4);
475 hal_gpio_pinmux_set_function(GPIO_PG5, 4);
476 #ifdef GMAC_USING_RGMII
477 hal_gpio_pinmux_set_function(GPIO_PG6, 4);
478 hal_gpio_pinmux_set_function(GPIO_PG7, 4);
479 hal_gpio_pinmux_set_function(GPIO_PG8, 4);
480 hal_gpio_pinmux_set_function(GPIO_PG9, 4);
481 hal_gpio_pinmux_set_function(GPIO_PG10, 4);
482 #endif /* GMAC_USING_RGMII */
483 hal_gpio_pinmux_set_function(GPIO_PG11, 4);
484 hal_gpio_pinmux_set_function(GPIO_PG12, 4);
485 hal_gpio_pinmux_set_function(GPIO_PG13, 4);
486 hal_gpio_pinmux_set_function(GPIO_PE14, 4);
487 hal_gpio_pinmux_set_function(GPIO_PE15, 4);
488 #endif /* GMAC_USING_GPIOG */
489 }
490
geth_phy_init(struct eth_device * dev)491 static int geth_phy_init(struct eth_device *dev)
492 {
493 uint32_t value;
494 uint16_t phy_val;
495 int i;
496 uint32_t phy_addr = 0x1f;
497 int duplex;
498 int speed;
499 uint16_t temp = 0;
500
501 for (i = 0; i < 0x1f; i++) {
502 value = (geth_phy_read((char *)dev, i, MII_PHYSID1,NULL)
503 & 0xffff) << 16;
504 value |= (geth_phy_read((char *)dev, i, MII_PHYSID2,NULL) & 0xffff);
505
506 if ((value & 0x1fffffff) == 0x1fffffff) {
507 rt_thread_delay(RT_TICK_PER_SECOND/100);
508 continue;
509 }
510 phy_addr = i;
511 break;
512 }
513
514 phy_addr = i;
515 if (phy_addr == 0x1f) {
516 printf("No PHY device!\n");
517 return -1;
518 }
519 phy_val = geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL);
520 geth_phy_write((char *)dev, phy_addr, MII_BMCR, phy_val | BMCR_RESET);
521 while (geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL) & BMCR_RESET);
522
523 phy_val = geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL);
524 geth_phy_write((char *)dev, phy_addr, MII_BMCR, phy_val | BMCR_FULLDPLX);
525
526 /* Reset phy chip */
527 phy_val = geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL);
528 geth_phy_write((char *)dev, phy_addr, MII_BMCR, (phy_val & ~BMCR_PDOWN));
529 while (geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL) & BMCR_PDOWN);
530
531 /* Wait BMSR_ANEGCOMPLETE be set */
532 while (!(geth_phy_read((char *)dev, phy_addr, MII_BMSR,NULL) & BMSR_ANEGCOMPLETE)) {
533 if (i > 40) {
534 printf("Warning: Auto negotiation timeout!\n");
535 return -1;
536 }
537 rt_thread_delay(RT_TICK_PER_SECOND/2);
538 i++;
539 }
540
541 phy_val = geth_phy_read((char *)dev, phy_addr, MII_RESV2,NULL);
542 temp = (phy_val>>4) & 0x3;
543 switch(temp)
544 {
545 case 2:
546 speed = 1000;
547 break;
548 case 1:
549 speed = 100;
550 break;
551 case 0:
552 speed = 10;
553 break;
554 default :
555 break;
556 }
557 temp = phy_val & 0x08;
558 duplex = (temp) ? 1 : 0;
559 geth_set_link_mode(rt_geth_dev.iobase,duplex,speed);
560
561 return 0;
562 }
563
geth_dma_desc_init(void)564 static int geth_dma_desc_init(void)
565 {
566 void *temp = RT_NULL;
567
568 #ifdef RT_USING_SMART
569 rt_geth_dev.get_buffer_config.rx_buff_addr = rt_pages_alloc(RX_BUFFER_INDEX_NUM);
570 #else
571 rt_geth_dev.get_buffer_config.rx_buff_addr = rt_malloc(DMA_MEM_ALIGN_SIZE * DMA_DESC_RX_NUM);
572 #endif
573 if(!rt_geth_dev.get_buffer_config.rx_buff_addr)
574 {
575 printf("ERROR: rx buff page alloc failed\n");
576 return -1;
577 }
578 //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.rx_buff_addr), (SYS_PAGE_SIZE<<RX_BUFFER_INDEX_NUM));
579 rt_geth_dev.get_buffer_config.phy_rx_buff_addr = (void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.rx_buff_addr);
580 #ifdef RT_USING_SMART
581 rt_geth_dev.get_buffer_config.tx_buff_addr = rt_pages_alloc(TX_BUFFER_INDEX_NUM);
582 #else
583 rt_geth_dev.get_buffer_config.tx_buff_addr = rt_malloc(DMA_MEM_ALIGN_SIZE * DMA_DESC_TX_NUM);
584 #endif
585 if(!rt_geth_dev.get_buffer_config.tx_buff_addr)
586 {
587 printf("ERROR: tx buff page alloc failed\n");
588 return -1;
589 }
590 //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.tx_buff_addr), (SYS_PAGE_SIZE<<TX_BUFFER_INDEX_NUM));
591 rt_geth_dev.get_buffer_config.phy_tx_buff_addr = (void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.tx_buff_addr);
592
593 #ifdef RT_USING_SMART
594 rt_geth_dev.get_buffer_config.dma_desc_rx = (hal_geth_dma_desc_t *)rt_pages_alloc(RX_BD_INDEX_NUM);
595 #else
596 rt_geth_dev.get_buffer_config.dma_desc_rx = (hal_geth_dma_desc_t *)rt_malloc(sizeof(hal_geth_dma_desc_t) * DMA_DESC_RX_NUM);
597 #endif
598 if(!rt_geth_dev.get_buffer_config.dma_desc_rx)
599 {
600 printf("ERROR: rx bd page alloc failed\n");
601 return -1;
602 }
603
604 //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx), (SYS_PAGE_SIZE<<RX_BD_INDEX_NUM));
605 rt_geth_dev.get_buffer_config.phy_dma_desc_rx = (hal_geth_dma_desc_t *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx);
606 #ifdef RT_USING_SMART
607 rt_geth_dev.get_buffer_config.dma_desc_tx = (hal_geth_dma_desc_t *)rt_pages_alloc(TX_BD_INDEX_NUM);
608 #else
609 rt_geth_dev.get_buffer_config.dma_desc_tx = (hal_geth_dma_desc_t *)rt_malloc(sizeof(hal_geth_dma_desc_t) * DMA_DESC_TX_NUM);
610 #endif
611 if(!rt_geth_dev.get_buffer_config.dma_desc_tx)
612 {
613 printf("ERROR: tx bd page alloc failed\n");
614 return -1;
615 }
616 //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx), (SYS_PAGE_SIZE<<TX_BD_INDEX_NUM));
617 rt_geth_dev.get_buffer_config.phy_dma_desc_tx = (hal_geth_dma_desc_t *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx);
618
619 return 0;
620 }
621
desc_init_chain(hal_geth_dma_desc_t * desc,unsigned long addr,unsigned long first_buff_addr,unsigned int size,unsigned int align_size)622 void desc_init_chain(hal_geth_dma_desc_t *desc, unsigned long addr, unsigned long first_buff_addr,unsigned int size,unsigned int align_size)
623 {
624 /* In chained mode the desc3 points to the next element in the ring.
625 * The latest element has to point to the head.
626 */
627 int i;
628 hal_geth_dma_desc_t *p = desc;
629 unsigned long dma_desc_phy = addr;
630 unsigned long dma_buff_phy = first_buff_addr;
631
632 for (i = 0; i < size; i++) {
633
634 if(i == (size - 1))
635 {
636 p->desc2 = (u32)dma_buff_phy;
637 p->desc3 = (u32)addr;
638 }
639 else
640 {
641 dma_desc_phy += sizeof(hal_geth_dma_desc_t);
642 p->desc2 = (u32)dma_buff_phy;
643 p->desc3 = (u32)dma_desc_phy;
644 p++;
645 }
646 dma_buff_phy += align_size;
647 }
648 }
649
rt_geth_init(rt_device_t dev)650 static rt_err_t rt_geth_init(rt_device_t dev)
651 {
652 uint32_t value;
653
654 /* Enable clock */
655
656 uint32_t used_type = rt_geth_dev.used_type;
657 uint32_t tx_delay = rt_geth_dev.tx_delay;
658 uint32_t rx_delay = rt_geth_dev.rx_delay;
659 uint32_t phy_interface = rt_geth_dev.phy_interface;
660 geth_clk_enable(used_type,phy_interface,tx_delay,rx_delay);
661
662 /* Pinctrl init */
663 geth_pinctrl_init();
664 /* MAC controller soft reset */
665 value = geth_mac_reset(rt_geth_dev.iobase);
666 if (!value)
667 {
668 printf("Gmac controller softs reset success\n");
669 }
670 else
671 {
672 printf("Gmac controller soft reset failed value %08x\n",value);
673 }
674 /* MAC controller initialize */
675 geth_mac_init(rt_geth_dev.iobase);
676
677 geth_set_hwaddr(rt_geth_dev.dev_addr);
678 /* Frame filter */
679 geth_set_filter(rt_geth_dev.iobase);
680 /* Burst should be 8 */
681 value = hal_readl(rt_geth_dev.iobase + GETH_BASIC_CTL1);
682 value |= (8 << 24);
683 hal_writel(value, rt_geth_dev.iobase + GETH_BASIC_CTL1);
684
685 /* Disable all interrupt of dma */
686 geth_all_int_disable(rt_geth_dev.iobase);
687
688 value = geth_dma_desc_init();
689 if(value < 0) {
690 printf("Gmac dma desc init fail!\n");
691 return -1;
692 }
693
694 memset((void *)rt_geth_dev.get_buffer_config.dma_desc_tx, 0, sizeof(hal_geth_dma_desc_t)*DMA_DESC_TX_NUM);
695 memset((void *)rt_geth_dev.get_buffer_config.dma_desc_rx, 0, sizeof(hal_geth_dma_desc_t)*DMA_DESC_RX_NUM);
696
697 desc_init_chain(rt_geth_dev.get_buffer_config.dma_desc_tx, (unsigned long)rt_geth_dev.get_buffer_config.phy_dma_desc_tx,
698 (unsigned long)rt_geth_dev.get_buffer_config.phy_tx_buff_addr, DMA_DESC_TX_NUM, DMA_MEM_ALIGN_SIZE);
699 desc_init_chain(rt_geth_dev.get_buffer_config.dma_desc_rx, (unsigned long)rt_geth_dev.get_buffer_config.phy_dma_desc_rx,
700 (unsigned long)rt_geth_dev.get_buffer_config.phy_rx_buff_addr, DMA_DESC_RX_NUM, DMA_MEM_ALIGN_SIZE);
701
702 gmac_rx_desc_fill(NULL);
703
704 hal_writel((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx, rt_geth_dev.iobase + GETH_TX_DESC_LIST);
705 hal_writel((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx, rt_geth_dev.iobase + GETH_RX_DESC_LIST);
706
707 awos_arch_mems_clean_dcache_region((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx, sizeof(hal_geth_dma_desc_t)*DMA_DESC_TX_NUM);
708 awos_arch_mems_clean_dcache_region((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx, sizeof(hal_geth_dma_desc_t)*DMA_DESC_RX_NUM);
709 dsb(v);
710 /* start tx & rx */
711 geth_start_tx(rt_geth_dev.iobase);
712 geth_start_rx(rt_geth_dev.iobase);
713
714 /* Enable transmit & receive */
715 geth_mac_enable(rt_geth_dev.iobase);
716
717 if (request_irq(GETH_IRQ_NUM, geth_irq_handler, 0, "geth", (void *)&rt_geth_dev) < 0)
718 {
719 printf("request irq error\n");
720 return -1;
721 }
722
723 enable_irq(GETH_IRQ_NUM);
724 geth_rx_int_enable(rt_geth_dev.iobase);
725 return 0;
726 }
727
rt_geth_uninitialize(rt_device_t dev)728 static void rt_geth_uninitialize(rt_device_t dev)
729 {
730 geth_mac_disable(rt_geth_dev.iobase);
731 geth_clk_disable();
732 }
733
rt_geth_open(rt_device_t dev,rt_uint16_t oflag)734 static rt_err_t rt_geth_open(rt_device_t dev, rt_uint16_t oflag)
735 {
736 printf("gmac open\n");
737 return RT_EOK;
738 }
739
rt_geth_close(rt_device_t dev)740 static rt_err_t rt_geth_close(rt_device_t dev)
741 {
742 printf("gmac close\n");
743 return RT_EOK;
744 }
745
rt_geth_read(rt_device_t dev,rt_off_t pos,void * buffer,rt_size_t size)746 static rt_ssize_t rt_geth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
747 {
748 printf("gmac read\n");
749 rt_set_errno(-RT_ENOSYS);
750 return 0;
751 }
752
rt_geth_write(rt_device_t dev,rt_off_t pos,const void * buffer,rt_size_t size)753 static rt_ssize_t rt_geth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
754 {
755 printf("gmac write\n");
756 rt_set_errno(-RT_ENOSYS);
757 return 0;
758 }
759
rt_geth_control(rt_device_t dev,int cmd,void * args)760 static rt_err_t rt_geth_control(rt_device_t dev, int cmd, void *args)
761 {
762 switch(cmd)
763 {
764 case NIOCTL_GADDR:
765 /* get mac address */
766 if(args) {
767 rt_memcpy(args, rt_geth_dev.dev_addr, 6);
768 } else {
769 return -RT_ERROR;
770 }
771 break;
772
773 default :
774 break;
775 }
776
777 return RT_EOK;
778 }
is_data_availabl()779 int is_data_availabl()
780 {
781
782 hal_geth_dma_desc_t *rx_p = NULL;
783 rx_p = rt_geth_dev.get_buffer_config.dma_desc_rx + rx_clean;
784
785 awos_arch_mems_flush_dcache_region((unsigned long)rx_p,sizeof(hal_geth_dma_desc_t));
786 awos_arch_mems_flush_dcache_region((unsigned long)rx_p->desc2,2048);
787 dsb(v);
788
789 if(!rx_p->desc0.rx.own)
790 {
791 if((rx_p->desc0.rx.last_desc != 1)||(rx_p->desc0.rx.first_desc != 1))
792 {
793 printf("first %d last %d\n",rx_p->desc0.rx.first_desc,rx_p->desc0.rx.last_desc);
794 }
795 return 0;
796 }
797 else
798 {
799 return -1;
800 }
801
802 return 0;
803 }
804
phy_link_detect(void * param)805 static void phy_link_detect(void *param)
806 {
807
808 uint16_t bmsr = 0;
809 uint16_t link_status = 0;
810 uint16_t link_status_old = 0;
811 uint16_t phy_val;
812 int ret = -1;
813
814 while(1)
815 {
816 bmsr = geth_phy_read(NULL, 0, MII_BMSR,NULL);
817 link_status = bmsr & BMSR_LSTATUS;
818 if(link_status_old != link_status)
819 {
820 if(link_status)
821 {
822 ret = geth_phy_init((struct eth_device *)param);
823 if(ret == 0)
824 {
825 geth_link_change(&rt_geth_dev,1);
826 }
827 }
828 else
829 {
830 if(link_status_old != link_status)
831 {
832 geth_link_change(&rt_geth_dev,0);
833 }
834 }
835
836 }
837 link_status_old = link_status;
838 rt_thread_delay(RT_TICK_PER_SECOND);
839 }
840
841 }
rt_geth_driver_init(void)842 void rt_geth_driver_init(void)
843 {
844 rt_err_t state = RT_EOK;
845
846 rt_geth_dev.iobase = IOBASE;
847 rt_geth_dev.phy_interface = PHY_INTERFACE_MODE_RGMII;
848 rt_geth_dev.used_type = EXT_PHY;
849 rt_geth_dev.tx_delay = 3;
850 rt_geth_dev.rx_delay = 0;
851
852 random_ether_addr(rt_geth_dev.dev_addr);
853 rt_geth_dev.parent.parent.init = rt_geth_init;
854 rt_geth_dev.parent.parent.open = rt_geth_open;
855 rt_geth_dev.parent.parent.close = rt_geth_close;
856 rt_geth_dev.parent.parent.read = rt_geth_read;
857 rt_geth_dev.parent.parent.write = rt_geth_write;
858 rt_geth_dev.parent.parent.control = rt_geth_control;
859 rt_geth_dev.parent.parent.user_data = RT_NULL;
860
861 rt_geth_dev.parent.eth_rx = rt_geth_recv;
862 rt_geth_dev.parent.eth_tx = rt_geth_xmit;
863 /* register eth device */
864 state = eth_device_init(&(rt_geth_dev.parent), "e0");
865 if (RT_EOK == state) {
866 printf("gmac device init success\n");
867 } else {
868 printf("gmac device init failed: %d\n", state);
869 }
870
871 rt_thread_t link_detect;
872 link_detect = rt_thread_create("link_detect",
873 phy_link_detect,
874 (void *)&rt_geth_dev,
875 4096,
876 13,
877 2);
878 if (link_detect != RT_NULL)
879 {
880 rt_thread_startup(link_detect);
881 }
882
883 return;
884 }
885
rt_hw_eth_drvier_init(void)886 static int rt_hw_eth_drvier_init(void)
887 {
888 rt_geth_driver_init();
889
890 return 0;
891 }
892 INIT_DEVICE_EXPORT(rt_hw_eth_drvier_init);
893