1 /*
2 * Copyright (c) 2021-2024 HPMicro
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-01-11 HPMicro First version
9 * 2022-07-10 HPMicro Driver optimization for multiple instances
10 */
11
12 #include <rtdevice.h>
13
14 #ifdef BSP_USING_ETH
15 #include <rtdbg.h>
16 #include "drv_enet.h"
17 #include "hpm_otp_drv.h"
18
19 #ifdef BSP_USING_ETH0
20
21 ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
22 __RW enet_rx_desc_t enet0_dma_rx_desc_tab[ENET0_RX_BUFF_COUNT]; /* Ethernet0 Rx DMA Descriptor */
23
24 ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
25 __RW enet_tx_desc_t enet0_dma_tx_desc_tab[ENET0_TX_BUFF_COUNT]; /* Ethernet0 Tx DMA Descriptor */
26
27 ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
28 __RW uint8_t enet0_rx_buff[ENET0_RX_BUFF_COUNT][ENET0_RX_BUFF_SIZE]; /* Ethernet0 Receive Buffer */
29
30 ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
31 __RW uint8_t enet0_tx_buff[ENET0_TX_BUFF_COUNT][ENET0_TX_BUFF_SIZE]; /* Ethernet0 Transmit Buffer */
32
33 struct eth_device eth0_dev;
34 static enet_device enet0_dev;
35 static enet_buff_config_t enet0_rx_buff_cfg = {.buffer = (uint32_t)enet0_rx_buff,
36 .count = ENET0_RX_BUFF_COUNT,
37 .size = ENET0_RX_BUFF_SIZE
38 };
39
40 static enet_buff_config_t enet0_tx_buff_cfg = {.buffer = (uint32_t)enet0_tx_buff,
41 .count = ENET0_TX_BUFF_COUNT,
42 .size = ENET0_TX_BUFF_SIZE
43 };
44
45 #if __USE_ENET_PTP
46 static enet_ptp_ts_update_t ptp_timestamp0 = {0, 0};
47 static enet_ptp_config_t ptp_config0 = {.timestamp_rollover_mode = enet_ts_dig_rollover_control,
48 .update_method = enet_ptp_time_fine_update,
49 .addend = 0xffffffff,
50 };
51 #endif
52
53 static hpm_enet_t enet0 = {.name = "E0",
54 .base = HPM_ENET0,
55 .irq_num = IRQn_ENET0,
56 .inf = BOARD_ENET0_INF,
57 .eth_dev = ð0_dev,
58 .enet_dev = &enet0_dev,
59 .rx_buff_cfg = &enet0_rx_buff_cfg,
60 .tx_buff_cfg = &enet0_tx_buff_cfg,
61 .dma_rx_desc_tab = enet0_dma_rx_desc_tab,
62 .dma_tx_desc_tab = enet0_dma_tx_desc_tab,
63 #if !BOARD_ENET0_INF
64 .int_refclk = BOARD_ENET0_INT_REF_CLK,
65 #else
66 .tx_delay = BOARD_ENET0_TX_DLY,
67 .rx_delay = BOARD_ENET0_RX_DLY,
68 #endif
69
70 #if __USE_ENET_PTP
71 .ptp_clk_src = BOARD_ENET0_PTP_CLOCK,
72 .ptp_config = &ptp_config0,
73 .ptp_timestamp = &ptp_timestamp0
74 #endif
75 };
76 #endif
77
78 mac_init_t mac_init[] = {
79 {MAC0_ADDR0, MAC0_ADDR1, MAC0_ADDR2, MAC0_ADDR3, MAC0_ADDR4, MAC0_ADDR5},
80 {MAC1_ADDR0, MAC1_ADDR1, MAC1_ADDR2, MAC1_ADDR3, MAC1_ADDR4, MAC1_ADDR5}
81 };
82
83 #ifdef BSP_USING_ETH1
84
85 ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
86 __RW enet_rx_desc_t enet1_dma_rx_desc_tab[ENET1_RX_BUFF_COUNT]; /* Ethernet1 Rx DMA Descriptor */
87
88 ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
89 __RW enet_tx_desc_t enet1_dma_tx_desc_tab[ENET1_TX_BUFF_COUNT]; /* Ethernet1 Tx DMA Descriptor */
90
91 ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
92 __RW uint8_t enet1_rx_buff[ENET1_RX_BUFF_COUNT][ENET1_RX_BUFF_SIZE]; /* Ethernet1 Receive Buffer */
93
94 ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
95 __RW uint8_t enet1_tx_buff[ENET1_TX_BUFF_COUNT][ENET1_TX_BUFF_SIZE]; /* Ethernet1 Transmit Buffer */
96
97 struct eth_device eth1_dev;
98 static enet_device enet1_dev;
99 static enet_buff_config_t enet1_rx_buff_cfg = {.buffer = (uint32_t)enet1_rx_buff,
100 .count = ENET1_RX_BUFF_COUNT,
101 .size = ENET1_RX_BUFF_SIZE
102 };
103
104 static enet_buff_config_t enet1_tx_buff_cfg = {.buffer = (uint32_t)enet1_tx_buff,
105 .count = ENET1_TX_BUFF_COUNT,
106 .size = ENET1_TX_BUFF_SIZE
107 };
108
109 #if __USE_ENET_PTP
110 static enet_ptp_ts_update_t ptp_timestamp1 = {0, 0};
111 static enet_ptp_config_t ptp_config1 = {.timestamp_rollover_mode = enet_ts_dig_rollover_control,
112 .update_method = enet_ptp_time_fine_update,
113 .addend = 0xffffffff,
114 };
115 #endif
116
117 static hpm_enet_t enet1 = {.name = "E1",
118 .base = HPM_ENET1,
119 .irq_num = IRQn_ENET1,
120 .inf = BOARD_ENET1_INF,
121 .eth_dev = ð1_dev,
122 .enet_dev = &enet1_dev,
123 .rx_buff_cfg = &enet1_rx_buff_cfg,
124 .tx_buff_cfg = &enet1_tx_buff_cfg,
125 .dma_rx_desc_tab = enet1_dma_rx_desc_tab,
126 .dma_tx_desc_tab = enet1_dma_tx_desc_tab,
127 #if !BOARD_ENET1_INF
128 .int_refclk = BOARD_ENET1_INT_REF_CLK,
129 #else
130 .tx_delay = BOARD_ENET1_TX_DLY,
131 .rx_delay = BOARD_ENET1_RX_DLY,
132 #endif
133
134 #if __USE_ENET_PTP
135 .ptp_clk_src = BOARD_ENET1_PTP_CLOCK,
136 .ptp_config = &ptp_config1,
137 .ptp_timestamp = &ptp_timestamp1
138 #endif
139 };
140 #endif
141
142 static hpm_enet_t *s_geths[] = {
143 #ifdef BSP_USING_ETH0
144 &enet0,
145 #endif
146
147 #ifdef BSP_USING_ETH1
148 &enet1
149 #endif
150 };
151
enet_get_mac_address(ENET_Type * ptr,uint8_t * mac)152 ATTR_WEAK uint8_t enet_get_mac_address(ENET_Type *ptr, uint8_t *mac)
153 {
154 uint32_t macl, mach;
155 uint8_t i;
156
157 i = (ptr == HPM_ENET0) ? 0 : 1;
158
159 if (mac == NULL) {
160 return ENET_MAC_ADDR_PARA_ERROR;
161 }
162
163 /* load mac address from OTP MAC area */
164 if (i == 0) {
165 macl = otp_read_from_shadow(OTP_SOC_MAC0_IDX);
166 mach = otp_read_from_shadow(OTP_SOC_MAC0_IDX + 1);
167
168 mac[0] = (macl >> 0) & 0xff;
169 mac[1] = (macl >> 8) & 0xff;
170 mac[2] = (macl >> 16) & 0xff;
171 mac[3] = (macl >> 24) & 0xff;
172 mac[4] = (mach >> 0) & 0xff;
173 mac[5] = (mach >> 8) & 0xff;
174 } else {
175 macl = otp_read_from_shadow(OTP_SOC_MAC0_IDX + 1);
176 mach = otp_read_from_shadow(OTP_SOC_MAC0_IDX + 2);
177
178 mac[0] = (macl >> 16) & 0xff;
179 mac[1] = (macl >> 24) & 0xff;
180 mac[2] = (mach >> 0) & 0xff;
181 mac[3] = (mach >> 8) & 0xff;
182 mac[4] = (mach >> 16) & 0xff;
183 mac[5] = (mach >> 24) & 0xff;
184 }
185
186 if (!IS_MAC_INVALID(mac)) {
187 return ENET_MAC_ADDR_FROM_OTP_MAC;
188 }
189
190 /* load MAC address from MACRO definitions */
191 memcpy(mac, &mac_init[i], ENET_MAC);
192 return ENET_MAC_ADDR_FROM_MACRO;
193 }
194
hpm_enet_init(enet_device * init)195 static rt_err_t hpm_enet_init(enet_device *init)
196 {
197 if (init->media_interface == enet_inf_rmii)
198 {
199 /* Initialize reference clock */
200 board_init_enet_rmii_reference_clock(init->instance, init->int_refclk);
201 }
202
203 #if ENET_SOC_RGMII_EN
204 /* Set RGMII clock delay */
205 if (init->media_interface == enet_inf_rgmii)
206 {
207 enet_rgmii_enable_clock(init->instance);
208 enet_rgmii_set_clock_delay(init->instance, init->tx_delay, init->rx_delay);
209 }
210 #endif
211 /* Get the default interrupt config */
212 enet_get_default_interrupt_config(init->instance, &init->int_config);
213
214 /* Initialize eth controller */
215 enet_controller_init(init->instance, init->media_interface, &init->desc, &init->mac_config, &init->int_config);
216
217 /* Disable LPI interrupt */
218 enet_disable_lpi_interrupt(init->instance);
219
220 #if __USE_ENET_PTP
221 /* initialize PTP Clock */
222 board_init_enet_ptp_clock(init->instance);
223
224 /* initialize Ethernet PTP Module */
225 init->ptp_config.ssinc = ENET_ONE_SEC_IN_NANOSEC / clock_get_frequency(init->ptp_clk_src);
226 enet_init_ptp(init->instance, &init->ptp_config);
227
228 /* set the initial timestamp */
229 enet_set_ptp_timestamp(init->instance, &init->ptp_timestamp);
230 #endif
231
232 /* enable irq */
233 intc_m_enable_irq(init->irq_number);
234
235 return RT_EOK;
236 }
237
rt_hpm_eth_init(rt_device_t dev)238 static rt_err_t rt_hpm_eth_init(rt_device_t dev)
239 {
240 uint8_t mac[ENET_MAC];
241
242 enet_device *enet_dev = (enet_device *)dev->user_data;
243
244 /* Initialize GPIOs */
245 board_init_enet_pins(enet_dev->instance);
246
247 /* Reset an enet PHY */
248 board_reset_enet_phy(enet_dev->instance);
249
250 /* Get MAC address */
251 enet_get_mac_address(enet_dev->instance, mac);
252
253 /* Set mac0 address */
254 enet_dev->mac_config.mac_addr_high[0] = mac[5] << 8 | mac[4];
255 enet_dev->mac_config.mac_addr_low[0] = mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0];
256 enet_dev->mac_config.valid_max_count = 1;
257
258 /* Initialize MAC and DMA */
259 if (hpm_enet_init(enet_dev) == 0)
260 {
261 LOG_D("Ethernet control initialize successfully\n");
262 return RT_EOK;
263 }
264 else
265 {
266 LOG_D("Ethernet control initialize unsuccessfully\n");
267 return -RT_ERROR;
268 }
269 }
270
rt_hpm_eth_open(rt_device_t dev,rt_uint16_t oflag)271 static rt_err_t rt_hpm_eth_open(rt_device_t dev, rt_uint16_t oflag)
272 {
273 return RT_EOK;
274 }
275
rt_hpm_eth_close(rt_device_t dev)276 static rt_err_t rt_hpm_eth_close(rt_device_t dev)
277 {
278 return RT_EOK;
279 }
280
rt_hpm_eth_read(rt_device_t dev,rt_off_t pos,void * buffer,rt_size_t size)281 static rt_ssize_t rt_hpm_eth_read(rt_device_t dev, rt_off_t pos, void * buffer, rt_size_t size)
282 {
283 return 0;
284 }
285
rt_hpm_eth_write(rt_device_t dev,rt_off_t pos,const void * buffer,rt_size_t size)286 static rt_ssize_t rt_hpm_eth_write(rt_device_t dev, rt_off_t pos, const void * buffer, rt_size_t size)
287 {
288 return 0;
289 }
290
rt_hpm_eth_control(rt_device_t dev,int cmd,void * args)291 static rt_err_t rt_hpm_eth_control(rt_device_t dev, int cmd, void * args)
292 {
293 uint8_t *mac = (uint8_t *)args;
294 enet_device *enet_dev = (enet_device *)dev->user_data;
295
296 switch (cmd)
297 {
298 case NIOCTL_GADDR:
299 if (args != NULL)
300 {
301 enet_get_mac_address(enet_dev->instance, (uint8_t *)mac);
302 SMEMCPY(args, mac, ENET_MAC);
303 }
304 else
305 {
306 return -RT_ERROR;
307 }
308 break;
309 default:
310 break;
311 }
312
313 return RT_EOK;
314 }
315
rt_hpm_eth_tx(rt_device_t dev,struct pbuf * p)316 static rt_err_t rt_hpm_eth_tx(rt_device_t dev, struct pbuf * p)
317 {
318 rt_err_t ret = RT_ERROR;
319 uint32_t status;
320 enet_device *enet_dev = (enet_device *)dev->user_data;
321 uint32_t tx_buff_size = enet_dev->desc.tx_buff_cfg.size;
322 struct pbuf *q;
323 uint8_t *buffer;
324 __IO enet_tx_desc_t *dma_tx_desc;
325 uint32_t frame_length = 0;
326 uint32_t buffer_offset = 0;
327 uint32_t bytes_left_to_copy = 0;
328 uint32_t payload_offset = 0;
329 enet_tx_desc_t *tx_desc_list_cur = enet_dev->desc.tx_desc_list_cur;
330
331 dma_tx_desc = tx_desc_list_cur;
332 buffer = (uint8_t *)(dma_tx_desc->tdes2_bm.buffer1);
333 buffer_offset = 0;
334 rt_tick_t t_start;
335
336 /* copy frame from pbufs to driver buffers */
337 for (q = p; q != NULL; q = q->next)
338 {
339 /* Get bytes in current lwIP buffer */
340 bytes_left_to_copy = q->len;
341 payload_offset = 0;
342
343 /* Check if the length of data to copy is bigger than Tx buffer size*/
344 while ((bytes_left_to_copy + buffer_offset) > tx_buff_size)
345 {
346 /* check DMA own status within timeout */
347 t_start = rt_tick_get();
348 while (dma_tx_desc->tdes0_bm.own)
349 {
350 if (rt_tick_get() - t_start > RT_TICK_PER_SECOND / 100)
351 {
352 return ERR_TIMEOUT;
353 }
354 }
355
356 /* Copy data to Tx buffer*/
357 SMEMCPY((uint8_t *)((uint8_t *)buffer + buffer_offset),
358 (uint8_t *)((uint8_t *)q->payload + payload_offset),
359 tx_buff_size - buffer_offset);
360
361 /* Point to next descriptor */
362 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
363
364 /* Check if the buffer is available */
365 if (dma_tx_desc->tdes0_bm.own != 0)
366 {
367 LOG_E("DMA tx desc buffer is not valid\n");
368 return ERR_BUF;
369 }
370
371 buffer = (uint8_t *)(dma_tx_desc->tdes2_bm.buffer1);
372
373 bytes_left_to_copy = bytes_left_to_copy - (tx_buff_size - buffer_offset);
374 payload_offset = payload_offset + (tx_buff_size - buffer_offset);
375 frame_length = frame_length + (tx_buff_size - buffer_offset);
376 buffer_offset = 0;
377 }
378
379 /* check DMA own status within timeout */
380 t_start = rt_tick_get();
381 while (dma_tx_desc->tdes0_bm.own)
382 {
383 if (rt_tick_get() - t_start > RT_TICK_PER_SECOND / 100)
384 {
385 return ERR_TIMEOUT;
386 }
387 }
388
389 /* Copy the remaining bytes */
390 buffer = (void *)sys_address_to_core_local_mem(0, (uint32_t)buffer);
391 SMEMCPY((uint8_t *)((uint8_t *)buffer + buffer_offset),
392 (uint8_t *)((uint8_t *)q->payload + payload_offset),
393 bytes_left_to_copy);
394
395 buffer_offset = buffer_offset + bytes_left_to_copy;
396 frame_length = frame_length + bytes_left_to_copy;
397 }
398
399 /* Prepare transmit descriptors to give to DMA */
400 LOG_D("The length of the transmitted frame: %d\n", frame_length);
401
402 frame_length += 4;
403 status = enet_prepare_transmission_descriptors(enet_dev->instance, &enet_dev->desc.tx_desc_list_cur, frame_length, enet_dev->desc.tx_buff_cfg.size);
404 if (status != ENET_SUCCESS)
405 {
406 LOG_E("Ethernet controller transmit unsuccessfully: %d\n", status);
407 }
408
409 return ERR_OK;
410 }
411
rt_hpm_eth_rx(rt_device_t dev)412 static struct pbuf *rt_hpm_eth_rx(rt_device_t dev)
413 {
414 struct pbuf *p = NULL, *q = NULL;
415 enet_device *enet_dev = (enet_device *)dev->user_data;
416 uint32_t rx_buff_size = enet_dev->desc.rx_buff_cfg.size;
417 uint16_t len = 0;
418 uint8_t *buffer;
419 enet_frame_t frame = {0, 0, 0};
420 enet_rx_desc_t *dma_rx_desc;
421 uint32_t buffer_offset = 0;
422 uint32_t payload_offset = 0;
423 uint32_t bytes_left_to_copy = 0;
424 uint32_t i = 0;
425
426 /* Get a received frame */
427 frame = enet_get_received_frame_interrupt(&enet_dev->desc.rx_desc_list_cur,
428 &enet_dev->desc.rx_frame_info,
429 enet_dev->desc.rx_buff_cfg.count);
430
431 /* Obtain the size of the packet and put it into the "len" variable. */
432 len = frame.length;
433 buffer = (uint8_t *)frame.buffer;
434
435 LOG_D("The current received frame length : %d\n", len);
436
437 if (len > 0)
438 {
439 /* allocate a pbuf chain of pbufs from the Lwip buffer pool */
440 p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
441
442 if (p != NULL)
443 {
444 dma_rx_desc = frame.rx_desc;
445 buffer_offset = 0;
446 for (q = p; q != NULL; q = q->next)
447 {
448 bytes_left_to_copy = q->len;
449 payload_offset = 0;
450
451 /* Check if the length of bytes to copy in current pbuf is bigger than Rx buffer size*/
452 while ((bytes_left_to_copy + buffer_offset) > rx_buff_size)
453 {
454 /* Copy data to pbuf */
455 SMEMCPY((uint8_t *)((uint8_t *)q->payload + payload_offset), (uint8_t *)((uint8_t *)buffer + buffer_offset), (rx_buff_size - buffer_offset));
456
457 /* Point to next descriptor */
458 dma_rx_desc = (enet_rx_desc_t *)(dma_rx_desc->rdes3_bm.next_desc);
459 buffer = (uint8_t *)(dma_rx_desc->rdes2_bm.buffer1);
460
461 bytes_left_to_copy = bytes_left_to_copy - (rx_buff_size - buffer_offset);
462 payload_offset = payload_offset + (rx_buff_size - buffer_offset);
463 buffer_offset = 0;
464 }
465 /* Copy remaining data in pbuf */
466 q->payload = (void *)sys_address_to_core_local_mem(0, (uint32_t)buffer);
467 buffer_offset = buffer_offset + bytes_left_to_copy;
468 }
469 }
470
471 /* Release descriptors to DMA */
472 /* Point to first descriptor */
473 dma_rx_desc = frame.rx_desc;
474
475 /* Set Own bit in Rx descriptors: gives the buffers back to DMA */
476 for (i = 0; i < enet_dev->desc.rx_frame_info.seg_count; i++)
477 {
478 dma_rx_desc->rdes0_bm.own = 1;
479 dma_rx_desc = (enet_rx_desc_t*)(dma_rx_desc->rdes3_bm.next_desc);
480 }
481
482 /* Clear Segment_Count */
483 enet_dev->desc.rx_frame_info.seg_count = 0;
484 }
485
486 /* Resume Rx Process */
487 if (ENET_DMA_STATUS_RU_GET(enet_dev->instance->DMA_STATUS))
488 {
489 enet_dev->instance->DMA_STATUS = ENET_DMA_STATUS_RU_MASK;
490 enet_dev->instance->DMA_RX_POLL_DEMAND = 1;
491 }
492
493 return p;
494 }
495
eth_rx_callback(struct eth_device * dev)496 static void eth_rx_callback(struct eth_device* dev)
497 {
498 rt_err_t result;
499 result = eth_device_ready(dev);
500 if (result != RT_EOK)
501 {
502 LOG_I("Receive callback error = %d\n", result);
503 }
504 }
505
isr_enet(hpm_enet_t * obj)506 void isr_enet(hpm_enet_t *obj)
507 {
508 uint32_t status;
509
510 status = obj->base->DMA_STATUS;
511
512 if (ENET_DMA_STATUS_GLPII_GET(status)) {
513 obj->base->LPI_CSR;
514 }
515
516 if (ENET_DMA_STATUS_RI_GET(status)) {
517 obj->base->DMA_STATUS |= ENET_DMA_STATUS_RI_SET(ENET_DMA_STATUS_RI_GET(status));
518 eth_rx_callback(obj->eth_dev);
519 }
520 }
521
522 #ifdef BSP_USING_ETH0
isr_enet0(void)523 void isr_enet0(void)
524 {
525 isr_enet(&enet0);
526 }
SDK_DECLARE_EXT_ISR_M(IRQn_ENET0,isr_enet0)527 SDK_DECLARE_EXT_ISR_M(IRQn_ENET0, isr_enet0)
528 #endif
529
530 #ifdef BSP_USING_ETH1
531 void isr_enet1(void)
532 {
533 isr_enet(&enet1);
534 }
SDK_DECLARE_EXT_ISR_M(IRQn_ENET1,isr_enet1)535 SDK_DECLARE_EXT_ISR_M(IRQn_ENET1, isr_enet1)
536 #endif
537
538 int rt_hw_eth_init(void)
539 {
540 rt_err_t err = RT_ERROR;
541
542 for (uint32_t i = 0; i < ARRAY_SIZE(s_geths); i++)
543 {
544 /* Clear memory */
545 memset((uint8_t *)s_geths[i]->dma_rx_desc_tab, 0x00, sizeof(enet_rx_desc_t) * s_geths[i]->rx_buff_cfg->count);
546 memset((uint8_t *)s_geths[i]->dma_tx_desc_tab, 0x00, sizeof(enet_tx_desc_t) * s_geths[i]->tx_buff_cfg->count);
547
548 memset((uint8_t *)s_geths[i]->rx_buff_cfg->buffer, 0x00, sizeof(s_geths[i]->rx_buff_cfg->size));
549 memset((uint8_t *)s_geths[i]->tx_buff_cfg->buffer, 0x00, sizeof(s_geths[i]->tx_buff_cfg->size));
550
551 /* Set list heads */
552 s_geths[i]->enet_dev->desc.tx_desc_list_head = (enet_tx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)s_geths[i]->dma_tx_desc_tab);
553 s_geths[i]->enet_dev->desc.rx_desc_list_head = (enet_rx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)s_geths[i]->dma_rx_desc_tab);
554
555 s_geths[i]->enet_dev->desc.tx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, s_geths[i]->tx_buff_cfg->buffer);
556 s_geths[i]->enet_dev->desc.tx_buff_cfg.count = s_geths[i]->tx_buff_cfg->count;
557 s_geths[i]->enet_dev->desc.tx_buff_cfg.size = s_geths[i]->tx_buff_cfg->size;
558
559 s_geths[i]->enet_dev->desc.rx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, s_geths[i]->rx_buff_cfg->buffer);
560 s_geths[i]->enet_dev->desc.rx_buff_cfg.count = s_geths[i]->rx_buff_cfg->count;
561 s_geths[i]->enet_dev->desc.rx_buff_cfg.size = s_geths[i]->rx_buff_cfg->size;
562
563 /* Set DMA PBL */
564 s_geths[i]->enet_dev->mac_config.dma_pbl = board_get_enet_dma_pbl(s_geths[i]->base);
565
566 /* Set instance */
567 s_geths[i]->enet_dev->instance = s_geths[i]->base;
568
569 /* Set media interface */
570 s_geths[i]->enet_dev->media_interface = s_geths[i]->inf ? enet_inf_rgmii : enet_inf_rmii;
571
572 if (s_geths[i]->enet_dev->media_interface == enet_inf_rmii)
573 {
574 /* Set refclk */
575 s_geths[i]->enet_dev->int_refclk = s_geths[i]->int_refclk;
576 } else {
577 /* Set TX/RX delay */
578 s_geths[i]->enet_dev->tx_delay = s_geths[i]->tx_delay;
579 s_geths[i]->enet_dev->rx_delay = s_geths[i]->rx_delay;
580 }
581
582
583 #if __USE_ENET_PTP
584 /* Set PTP function */
585 s_geths[i]->enet_dev->ptp_clk_src = s_geths[i]->ptp_clk_src;
586 s_geths[i]->enet_dev->ptp_config = *s_geths[i]->ptp_config;
587 s_geths[i]->enet_dev->ptp_timestamp = *s_geths[i]->ptp_timestamp;
588 #endif
589
590 /* Set the irq number */
591 s_geths[i]->enet_dev->irq_number = s_geths[i]->irq_num;
592
593 /* Set the parent parameters */
594 s_geths[i]->eth_dev->parent.init = rt_hpm_eth_init;
595 s_geths[i]->eth_dev->parent.open = rt_hpm_eth_open;
596 s_geths[i]->eth_dev->parent.close = rt_hpm_eth_close;
597 s_geths[i]->eth_dev->parent.read = rt_hpm_eth_read;
598 s_geths[i]->eth_dev->parent.write = rt_hpm_eth_write;
599 s_geths[i]->eth_dev->parent.control = rt_hpm_eth_control;
600
601 s_geths[i]->eth_dev->parent.user_data = s_geths[i]->enet_dev;
602
603 s_geths[i]->eth_dev->eth_rx = rt_hpm_eth_rx;
604 s_geths[i]->eth_dev->eth_tx = rt_hpm_eth_tx;
605
606 err = eth_device_init(s_geths[i]->eth_dev, s_geths[i]->name);
607
608 if (RT_EOK == err)
609 {
610 LOG_D("Ethernet device %d initialize successfully!\n", i);
611 }
612 else
613 {
614 LOG_D("Ethernet device %d initialize unsuccessfully!\n");
615 return err;
616 }
617 }
618
619 return err;
620
621 }
622 INIT_DEVICE_EXPORT(rt_hw_eth_init);
623 #endif /* BSP_USING_ETH */
624