1 /*
2 * Copyright (c) 2021-2024 HPMicro
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8 /*---------------------------------------------------------------------
9 * Includes
10 *---------------------------------------------------------------------
11 */
12 #include "hpm_enet_drv.h"
13 #include "hpm_enet_soc_drv.h"
14
15 /*---------------------------------------------------------------------
16 * Internal API
17 *---------------------------------------------------------------------
18 */
enet_mode_init(ENET_Type * ptr,uint32_t intr)19 static void enet_mode_init(ENET_Type *ptr, uint32_t intr)
20 {
21 /* receive and transmit store and forward */
22 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_RSF_MASK | ENET_DMA_OP_MODE_TSF_MASK;
23
24 /* enalbe hardware flow control */
25 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_EFC_MASK;
26
27 /* enable error frame and undersized good frame forwarding */
28 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_FEF_MASK;
29
30 /* disable osf mode */
31 ptr->DMA_OP_MODE &= ~ENET_DMA_OP_MODE_OSF_MASK;
32
33 ptr->DMA_INTR_EN |= intr;
34
35 while (ENET_DMA_BUS_STATUS_AXIRDSTS_GET(ptr->DMA_BUS_STATUS) || ENET_DMA_BUS_STATUS_AXWHSTS_GET(ptr->DMA_BUS_STATUS)) {
36 }
37
38 /* start the receive and transmit dma */
39 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_ST_MASK | ENET_DMA_OP_MODE_SR_MASK;
40 }
41
enet_dma_init(ENET_Type * ptr,enet_desc_t * desc,uint32_t intr,uint8_t pbl)42 static int enet_dma_init(ENET_Type *ptr, enet_desc_t *desc, uint32_t intr, uint8_t pbl)
43 {
44 /* generate software reset */
45 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_SWR_MASK;
46
47 /* wait for the completion of reset process */
48 while (ENET_DMA_BUS_MODE_SWR_GET(ptr->DMA_BUS_MODE)) {
49 }
50
51 /* initialize bus mode register */
52 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_AAL_MASK;
53
54 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_FB_MASK;
55
56 /* enable pblx8 mode */
57 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_PBLX8_MASK;
58
59 /* set programmable burst length */
60 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_PBL_MASK;
61 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_PBL_SET(pbl);
62
63 /* disable separate pbl */
64 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_USP_MASK;
65
66 /* descriptor length */
67 #if ENET_SOC_ALT_EHD_DES_LEN == ENET_SOC_ALT_EHD_DES_MIN_LEN
68 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_ATDS_MASK;
69 #elif ENET_SOC_ALT_EHD_DES_LEN == ENET_SOC_ALT_EHD_DES_MAX_LEN
70 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_ATDS_MASK;
71 #endif
72
73 /* set the maximum enabled burst length */
74 if (ENET_DMA_BUS_MODE_FB_GET(ptr->DMA_BUS_MODE) == 0) {
75 ptr->DMA_AXI_MODE |= ENET_DMA_AXI_MODE_BLEN4_MASK | ENET_DMA_AXI_MODE_BLEN8_MASK | ENET_DMA_AXI_MODE_BLEN16_MASK;
76 } else {
77 /* TODO: set BLENX_MASK */
78 }
79
80 /* initialize Tx descriptors list: chain mode */
81 enet_dma_tx_desc_chain_init(ptr, desc);
82
83 /* initialize Rx descriptors list: Chain Mode */
84 enet_dma_rx_desc_chain_init(ptr, desc);
85
86 enet_mode_init(ptr, intr);
87
88 enet_dma_flush(ptr);
89
90 return true;
91 }
92
enet_mac_init(ENET_Type * ptr,enet_mac_config_t * config,enet_inf_type_t inf_type)93 static int enet_mac_init(ENET_Type *ptr, enet_mac_config_t *config, enet_inf_type_t inf_type)
94 {
95 for (int i = 0; i < config->valid_max_count; i++) {
96 if (i == 0) {
97 ptr->MAC_ADDR_0_HIGH &= ~ENET_MAC_ADDR_0_HIGH_ADDRHI_MASK;
98 ptr->MAC_ADDR_0_LOW &= ~ENET_MAC_ADDR_0_LOW_ADDRLO_MASK;
99 ptr->MAC_ADDR_0_HIGH |= ENET_MAC_ADDR_0_HIGH_ADDRHI_SET(config->mac_addr_high[i]);
100 ptr->MAC_ADDR_0_LOW |= ENET_MAC_ADDR_0_LOW_ADDRLO_SET(config->mac_addr_low[i]);
101 } else {
102 ptr->MAC_ADDR[i-1].HIGH &= ~ENET_MAC_ADDR_HIGH_ADDRHI_MASK;
103 ptr->MAC_ADDR[i-1].LOW &= ~ENET_MAC_ADDR_LOW_ADDRLO_MASK;
104 ptr->MAC_ADDR[i-1].HIGH |= ENET_MAC_ADDR_HIGH_AE_MASK | ENET_MAC_ADDR_HIGH_ADDRHI_SET(config->mac_addr_high[i]);
105 ptr->MAC_ADDR[i-1].LOW |= ENET_MAC_ADDR_LOW_ADDRLO_SET(config->mac_addr_low[i]);
106 }
107 }
108
109 /* set the appropriate filters for the incoming frames */
110 ptr->MACFF |= ENET_MACFF_RA_SET(1); /* receive all */
111
112 /* replace the content of the mac address 0 in the sa field of all transmitted frames */
113 ptr->MACCFG &= ~ENET_MACCFG_SARC_MASK;
114 ptr->MACCFG |= ENET_MACCFG_SARC_SET(config->sarc);
115
116 ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
117
118 if (inf_type == enet_inf_rgmii) {
119 ptr->MACCFG &= ~ENET_MACCFG_PS_MASK;
120 } else if (inf_type == enet_inf_rmii) {
121 ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
122 }
123 #if defined(HPM_IP_FEATURE_ENET_HAS_MII_MODE) && HPM_IP_FEATURE_ENET_HAS_MII_MODE
124 else if (inf_type == enet_inf_mii) {
125 ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
126 }
127 #endif
128 else {
129 return status_invalid_argument;
130 }
131
132 ptr->MACCFG |= ENET_MACCFG_DM_MASK;
133
134 if (ENET_MACCFG_DM_GET(ptr->MACCFG) == 0) {
135 ptr->MACCFG |= ENET_MACCFG_IFG_SET(4);
136 } else {
137 ptr->MACCFG |= ENET_MACCFG_IFG_SET(2);
138 }
139
140
141 /* enable transmitter enable and receiver */
142 ptr->MACCFG |= ENET_MACCFG_TE_MASK | ENET_MACCFG_RE_MASK;
143
144 return true;
145 }
146
enet_mask_interrupt_event(ENET_Type * ptr,uint32_t mask)147 static void enet_mask_interrupt_event(ENET_Type *ptr, uint32_t mask)
148 {
149 /* mask the specified interrupts */
150 ptr->INTR_MASK |= mask;
151 }
152
153 /*---------------------------------------------------------------------
154 * Driver API
155 *---------------------------------------------------------------------
156 */
enet_get_interrupt_status(ENET_Type * ptr)157 uint32_t enet_get_interrupt_status(ENET_Type *ptr)
158 {
159 return ptr->INTR_STATUS;
160 }
161
enet_mask_mmc_rx_interrupt_event(ENET_Type * ptr,uint32_t mask)162 void enet_mask_mmc_rx_interrupt_event(ENET_Type *ptr, uint32_t mask)
163 {
164 ptr->MMC_INTR_MASK_RX |= mask;
165 }
166
enet_get_mmc_rx_interrupt_status(ENET_Type * ptr)167 uint32_t enet_get_mmc_rx_interrupt_status(ENET_Type *ptr)
168 {
169 return ptr->MMC_INTR_RX;
170 }
171
enet_mask_mmc_tx_interrupt_event(ENET_Type * ptr,uint32_t mask)172 void enet_mask_mmc_tx_interrupt_event(ENET_Type *ptr, uint32_t mask)
173 {
174 ptr->MMC_INTR_MASK_TX |= mask;
175 }
176
enet_get_mmc_tx_interrupt_status(ENET_Type * ptr)177 uint32_t enet_get_mmc_tx_interrupt_status(ENET_Type *ptr)
178 {
179 return ptr->MMC_INTR_TX;
180 }
181
enet_dma_flush(ENET_Type * ptr)182 void enet_dma_flush(ENET_Type *ptr)
183 {
184 /* flush DMA transmit FIFO */
185 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_FTF_MASK;
186 while (ENET_DMA_OP_MODE_FTF_GET(ptr->DMA_OP_MODE)) {
187
188 }
189 }
190
enet_write_phy(ENET_Type * ptr,uint32_t phy_addr,uint32_t addr,uint32_t data)191 void enet_write_phy(ENET_Type *ptr, uint32_t phy_addr, uint32_t addr, uint32_t data)
192 {
193 /* set data to be written */
194 ptr->GMII_DATA = ENET_GMII_DATA_GD_SET(data);
195
196 /* set phy address , register address, write operation and busy flag */
197 ptr->GMII_ADDR = ENET_GMII_ADDR_PA_SET(phy_addr)
198 | ENET_GMII_ADDR_GR_SET(addr)
199 | ENET_GMII_ADDR_CR_SET(enet_csr_150m_to_250m_mdc_csr_div_102)
200 | ENET_GMII_ADDR_GW_SET(enet_phy_op_write)
201 | ENET_GMII_ADDR_GB_SET(enet_gmii_busy);
202
203 /* wait until the write operation is completed */
204 while (ENET_GMII_ADDR_GB_GET(ptr->GMII_ADDR)) {
205 }
206 }
207
enet_read_phy(ENET_Type * ptr,uint32_t phy_addr,uint32_t addr)208 uint16_t enet_read_phy(ENET_Type *ptr, uint32_t phy_addr, uint32_t addr)
209 {
210 /* set phy address, register address, read operation and busy flag */
211 ptr->GMII_ADDR = ENET_GMII_ADDR_PA_SET(phy_addr)
212 | ENET_GMII_ADDR_GR_SET(addr)
213 | ENET_GMII_ADDR_CR_SET(enet_csr_150m_to_250m_mdc_csr_div_102)
214 | ENET_GMII_ADDR_GW_SET(enet_phy_op_read)
215 | ENET_GMII_ADDR_GB_SET(enet_gmii_busy);
216
217 /* wait until the write operation is completed */
218 while (ENET_GMII_ADDR_GB_GET(ptr->GMII_ADDR)) {
219 }
220
221 /* read and return data */
222 return (uint16_t)ENET_GMII_DATA_GD_GET(ptr->GMII_DATA);
223 }
224
enet_set_line_speed(ENET_Type * ptr,enet_line_speed_t speed)225 void enet_set_line_speed(ENET_Type *ptr, enet_line_speed_t speed)
226 {
227 ptr->MACCFG &= ~(ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK);
228 ptr->MACCFG |= speed << ENET_MACCFG_FES_SHIFT;
229 }
230
enet_set_duplex_mode(ENET_Type * ptr,enet_duplex_mode_t mode)231 void enet_set_duplex_mode(ENET_Type *ptr, enet_duplex_mode_t mode)
232 {
233 ptr->MACCFG &= ~ENET_MACCFG_DM_MASK;
234 ptr->MACCFG |= ENET_MACCFG_DM_SET(mode);
235 }
236
enet_controller_init(ENET_Type * ptr,enet_inf_type_t inf_type,enet_desc_t * desc,enet_mac_config_t * config,enet_int_config_t * int_config)237 hpm_stat_t enet_controller_init(ENET_Type *ptr, enet_inf_type_t inf_type, enet_desc_t *desc, enet_mac_config_t *config, enet_int_config_t *int_config)
238 {
239 /* select an interface */
240 enet_intf_selection(ptr, inf_type);
241
242 /* initialize DMA */
243 enet_dma_init(ptr, desc, int_config->int_enable, config->dma_pbl);
244
245 /* initialize MAC */
246 enet_mac_init(ptr, config, inf_type);
247
248 /* mask the specified interrupts */
249 enet_mask_interrupt_event(ptr, int_config->int_mask);
250
251 /* mask the mmc rx interrupts */
252 enet_mask_mmc_rx_interrupt_event(ptr, int_config->mmc_intr_mask_rx);
253
254 /* mask the mmc tx interrupts */
255 enet_mask_mmc_tx_interrupt_event(ptr, int_config->mmc_intr_mask_tx);
256
257 return status_success;
258 }
259
260 /*****************************************************************************
261 * DMA API
262 ****************************************************************************/
enet_rx_resume(ENET_Type * ptr)263 void enet_rx_resume(ENET_Type *ptr)
264 {
265 if (ENET_DMA_STATUS_RU_GET(ptr->DMA_STATUS)) {
266 ptr->DMA_STATUS = ENET_DMA_STATUS_RU_MASK;
267 ptr->DMA_RX_POLL_DEMAND = 1;
268 }
269 }
270
enet_check_received_frame(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info)271 uint32_t enet_check_received_frame(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info)
272 {
273 enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
274
275 /* check if the last segment */
276 if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
277 (rx_desc_list_cur->rdes0_bm.ls == 1)) {
278 rx_frame_info->seg_count++;
279 if (rx_frame_info->seg_count == 1) {
280 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
281 }
282 rx_frame_info->ls_rx_desc = rx_desc_list_cur;
283 return 1;
284 }
285 /* check if the first segment */
286 else if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
287 (rx_desc_list_cur->rdes0_bm.fs == 1) &&
288 (rx_desc_list_cur->rdes0_bm.ls == 0)) {
289 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
290 rx_frame_info->ls_rx_desc = NULL;
291 rx_frame_info->seg_count = 1;
292 rx_desc_list_cur = (enet_rx_desc_t *)rx_desc_list_cur->rdes3_bm.next_desc;
293 *parent_rx_desc_list_cur = rx_desc_list_cur;
294 }
295
296 /* check if intermediate segments */
297 else if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
298 (rx_desc_list_cur->rdes0_bm.fs == 0) &&
299 (rx_desc_list_cur->rdes0_bm.ls == 0)) {
300 rx_frame_info->seg_count++;
301 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
302 *parent_rx_desc_list_cur = rx_desc_list_cur;
303 }
304
305 return 0;
306 }
307
enet_get_received_frame(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info)308 enet_frame_t enet_get_received_frame(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info)
309 {
310 uint32_t frame_length = 0;
311 enet_frame_t frame = {0, 0, 0};
312 enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
313
314 /* get the frame length of the received packet: substruct 4 bytes of the CRC */
315 frame_length = rx_desc_list_cur->rdes0_bm.fl - 4;
316 frame.length = frame_length;
317
318 /* get the address of the first frame descriptor and the buffer start address */
319 frame.rx_desc = rx_frame_info->fs_rx_desc;
320 frame.buffer = rx_frame_info->fs_rx_desc->rdes2_bm.buffer1;
321
322 /* update the Ethernet dma global Rx descriptor with next Rx descriptor */
323 /* chained mode */
324 /* selects the next dma Rx descriptor list for next buffer to read */
325 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
326 *parent_rx_desc_list_cur = rx_desc_list_cur;
327
328 return frame;
329 }
330
enet_get_received_frame_interrupt(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info,uint32_t rx_desc_count)331 enet_frame_t enet_get_received_frame_interrupt(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info, uint32_t rx_desc_count)
332 {
333 enet_frame_t frame = {0, 0, 0};
334 uint32_t desc_scan_counter = 0;
335 enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
336
337 /* scan descriptors owned by CPU */
338 while ((rx_desc_list_cur->rdes0_bm.own == 0) &&
339 (desc_scan_counter < rx_desc_count)) {
340
341 desc_scan_counter++;
342
343 /* check if first segment in frame */
344 if ((rx_desc_list_cur->rdes0_bm.fs == 1) &&
345 (rx_desc_list_cur->rdes0_bm.ls == 0)) {
346 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
347 rx_frame_info->seg_count = 1;
348 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
349 *parent_rx_desc_list_cur = rx_desc_list_cur;
350 }
351
352 /* check if intermediate segment */
353 else if ((rx_desc_list_cur->rdes0_bm.ls == 0) &&
354 (rx_desc_list_cur->rdes0_bm.fs == 0)) {
355 rx_frame_info->seg_count++;
356 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
357 *parent_rx_desc_list_cur = rx_desc_list_cur;
358 }
359
360 /* should be last segment */
361 else {
362 /* last segment */
363 rx_frame_info->ls_rx_desc = rx_desc_list_cur;
364
365 rx_frame_info->seg_count++;
366
367 /* first segment is last segment */
368 if (rx_frame_info->seg_count == 1) {
369 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
370 }
371
372 /* get the frame length of the received packet: substruct 4 bytes of the crc */
373 frame.length = rx_desc_list_cur->rdes0_bm.fl - 4;
374
375 /* get the address of the buffer start address */
376 /* check if more than one segment in the frame */
377 if (rx_frame_info->seg_count > 1) {
378 frame.buffer = rx_frame_info->fs_rx_desc->rdes2_bm.buffer1;
379 } else {
380 frame.buffer = rx_desc_list_cur->rdes2_bm.buffer1;
381 }
382
383 frame.rx_desc = rx_frame_info->fs_rx_desc;
384
385 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
386 *parent_rx_desc_list_cur = rx_desc_list_cur;
387
388 return frame;
389 }
390 }
391
392 return frame;
393 }
394
enet_get_default_tx_control_config(ENET_Type * ptr,enet_tx_control_config_t * config)395 void enet_get_default_tx_control_config(ENET_Type *ptr, enet_tx_control_config_t *config)
396 {
397 (void) ptr;
398 config->enable_ioc = false;
399 config->disable_crc = true;
400 config->disable_pad = false;
401 config->enable_ttse = false;
402 config->enable_crcr = true;
403 config->cic = enet_cic_ip_pseudoheader;
404 config->vlic = enet_vlic_disable;
405 config->saic = enet_saic_disable;
406 }
407
enet_get_default_interrupt_config(ENET_Type * ptr,enet_int_config_t * config)408 void enet_get_default_interrupt_config(ENET_Type *ptr, enet_int_config_t *config)
409 {
410 (void) ptr;
411
412 config->int_enable = enet_normal_int_sum_en /* Enable normal interrupt summary */
413 | enet_receive_int_en; /* Enable receive interrupt */
414
415 config->int_mask = enet_rgsmii_int_mask /* Disable RGSMII interrupt */
416 | enet_lpi_int_mask; /* Disable LPI interrupt */
417
418 config->mmc_intr_mask_rx = 0x03ffffff; /* Disable all mmc rx interrupt events */
419 config->mmc_intr_mask_tx = 0x03ffffff; /* Disable all mmc tx interrupt events */
420 }
421
enet_prepare_tx_desc_with_ts_record(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,enet_tx_control_config_t * config,uint16_t frame_length,uint16_t tx_buff_size,enet_ptp_ts_system_t * timestamp)422 uint32_t enet_prepare_tx_desc_with_ts_record(ENET_Type *ptr,
423 enet_tx_desc_t **parent_tx_desc_list_cur,
424 enet_tx_control_config_t *config,
425 uint16_t frame_length, uint16_t tx_buff_size,
426 enet_ptp_ts_system_t *timestamp)
427 {
428 uint32_t buf_count = 0, size = 0, i = 0;
429 uint32_t retry_cnt = ENET_RETRY_CNT;
430 enet_tx_desc_t *dma_tx_desc;
431 enet_tx_desc_t *tx_desc_list_cur = *parent_tx_desc_list_cur;
432
433 if (tx_buff_size == 0) {
434 return ENET_ERROR;
435 }
436 /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
437
438 dma_tx_desc = tx_desc_list_cur;
439
440 if (frame_length > tx_buff_size) {
441 buf_count = frame_length / tx_buff_size;
442 if (frame_length % tx_buff_size) {
443 buf_count++;
444 }
445 } else {
446 buf_count = 1;
447 }
448
449 if (buf_count == 1) {
450 /*set the last and the first segment */
451 dma_tx_desc->tdes0_bm.own = 0;
452 dma_tx_desc->tdes0_bm.fs = 1;
453 dma_tx_desc->tdes0_bm.ls = 1;
454 dma_tx_desc->tdes0_bm.ic = config->enable_ioc;
455 dma_tx_desc->tdes0_bm.dc = config->disable_crc;
456 dma_tx_desc->tdes0_bm.dp = config->disable_pad;
457 dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
458 dma_tx_desc->tdes0_bm.cic = config->cic;
459 dma_tx_desc->tdes0_bm.vlic = config->vlic;
460 dma_tx_desc->tdes0_bm.ttse = config->enable_ttse;
461 dma_tx_desc->tdes1_bm.saic = config->saic;
462 /* set the frame size */
463 dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
464 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
465 dma_tx_desc->tdes0 |= 1 << 31;
466 ptr->DMA_TX_POLL_DEMAND = 1;
467
468 if (dma_tx_desc->tdes0_bm.ttse == true) {
469 do {
470
471 } while (dma_tx_desc->tdes0_bm.own == 1 && retry_cnt-- > 0);
472
473 if (retry_cnt == 0) {
474 return ENET_ERROR;
475 }
476
477 timestamp->sec = dma_tx_desc->tdes7_bm.ttsh;
478 timestamp->nsec = dma_tx_desc->tdes6_bm.ttsl;
479 }
480
481 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
482 } else {
483 for (i = 0; i < buf_count; i++) {
484 /* get the next available tx descriptor */
485 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
486
487 /* clear first and last segment bits */
488 dma_tx_desc->tdes0_bm.fs = 0;
489 dma_tx_desc->tdes0_bm.ls = 0;
490
491 if (i == 0) {
492 /* setting the first segment bit */
493 dma_tx_desc->tdes0_bm.fs = 1;
494 dma_tx_desc->tdes0_bm.dc = config->disable_crc;
495 dma_tx_desc->tdes0_bm.dp = config->disable_pad;
496 dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
497 dma_tx_desc->tdes0_bm.cic = config->cic;
498 dma_tx_desc->tdes0_bm.vlic = config->vlic;
499 dma_tx_desc->tdes0_bm.ttse = config->enable_ttse;
500 dma_tx_desc->tdes1_bm.saic = config->saic;
501
502 if (dma_tx_desc->tdes0_bm.ttse == true) {
503 do {
504
505 } while (dma_tx_desc->tdes0_bm.own == 1 && retry_cnt-- > 0);
506
507 if (retry_cnt == 0) {
508 return ENET_ERROR;
509 }
510
511 timestamp->sec = dma_tx_desc->tdes7_bm.ttsh;
512 timestamp->nsec = dma_tx_desc->tdes6_bm.ttsl;
513 }
514 }
515
516 /* set the buffer 1 size */
517 dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
518
519 if (i == (buf_count - 1)) {
520 /* set the last segment bit */
521 dma_tx_desc->tdes0_bm.ls = 1;
522 dma_tx_desc->tdes0_bm.ic = config->enable_ioc;
523 size = frame_length - (buf_count - 1) * tx_buff_size;
524 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
525
526 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
527 dma_tx_desc->tdes0 |= 1 << 31;
528 ptr->DMA_TX_POLL_DEMAND = 1;
529 }
530 }
531 }
532
533 tx_desc_list_cur = dma_tx_desc;
534 *parent_tx_desc_list_cur = tx_desc_list_cur;
535
536 return ENET_SUCCESS;
537 }
538
enet_prepare_tx_desc(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,enet_tx_control_config_t * config,uint16_t frame_length,uint16_t tx_buff_size)539 uint32_t enet_prepare_tx_desc(ENET_Type *ptr, enet_tx_desc_t **parent_tx_desc_list_cur, enet_tx_control_config_t *config, uint16_t frame_length, uint16_t tx_buff_size)
540 {
541 uint32_t buf_count = 0, size = 0, i = 0;
542 enet_tx_desc_t *dma_tx_desc;
543 enet_tx_desc_t *tx_desc_list_cur = *parent_tx_desc_list_cur;
544
545 if (tx_buff_size == 0) {
546 return ENET_ERROR;
547 }
548 /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
549 dma_tx_desc = tx_desc_list_cur;
550 if (frame_length > tx_buff_size) {
551 buf_count = frame_length / tx_buff_size;
552 if (frame_length % tx_buff_size) {
553 buf_count++;
554 }
555 } else {
556 buf_count = 1;
557 }
558
559 if (buf_count == 1) {
560 /*set the last and the first segment */
561 dma_tx_desc->tdes0_bm.own = 0;
562 dma_tx_desc->tdes0_bm.fs = 1;
563 dma_tx_desc->tdes0_bm.ls = 1;
564 dma_tx_desc->tdes0_bm.ic = config->enable_ioc;
565 dma_tx_desc->tdes0_bm.dc = config->disable_crc;
566 dma_tx_desc->tdes0_bm.dp = config->disable_pad;
567 dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
568 dma_tx_desc->tdes0_bm.cic = config->cic;
569 dma_tx_desc->tdes0_bm.vlic = config->vlic;
570 dma_tx_desc->tdes1_bm.saic = config->saic;
571 /* set the frame size */
572 dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
573 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
574 dma_tx_desc->tdes0 |= 1 << 31;
575 ptr->DMA_TX_POLL_DEMAND = 1;
576
577 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
578 } else {
579 for (i = 0; i < buf_count; i++) {
580 /* clear first and last segment bits */
581 dma_tx_desc->tdes0_bm.fs = 0;
582 dma_tx_desc->tdes0_bm.ls = 0;
583
584 if (i == 0) {
585 /* setting the first segment bit */
586 dma_tx_desc->tdes0_bm.fs = 1;
587 dma_tx_desc->tdes0_bm.dc = config->disable_crc;
588 dma_tx_desc->tdes0_bm.dp = config->disable_pad;
589 dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
590 dma_tx_desc->tdes0_bm.cic = config->cic;
591 dma_tx_desc->tdes0_bm.vlic = config->vlic;
592 dma_tx_desc->tdes1_bm.saic = config->saic;
593 }
594
595 /* set the buffer 1 size */
596 dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
597
598 if (i == (buf_count - 1)) {
599 /* set the last segment bit */
600 dma_tx_desc->tdes0_bm.ls = 1;
601 dma_tx_desc->tdes0_bm.ic = config->enable_ioc;
602 size = frame_length - (buf_count - 1) * tx_buff_size;
603 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
604
605 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
606 dma_tx_desc->tdes0 |= 1 << 31;
607 ptr->DMA_TX_POLL_DEMAND = 1;
608 }
609
610 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
611 }
612 }
613
614 tx_desc_list_cur = dma_tx_desc;
615 *parent_tx_desc_list_cur = tx_desc_list_cur;
616
617 return ENET_SUCCESS;
618 }
619
enet_prepare_transmission_descriptors(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,uint16_t frame_length,uint16_t tx_buff_size)620 uint32_t enet_prepare_transmission_descriptors(ENET_Type *ptr, enet_tx_desc_t **parent_tx_desc_list_cur, uint16_t frame_length, uint16_t tx_buff_size)
621 {
622 uint32_t buf_count = 0, size = 0, i = 0;
623 enet_tx_desc_t *dma_tx_desc;
624 enet_tx_desc_t *tx_desc_list_cur = *parent_tx_desc_list_cur;
625
626 if (tx_buff_size == 0) {
627 return ENET_ERROR;
628 }
629 /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
630 dma_tx_desc = tx_desc_list_cur;
631 if (frame_length > tx_buff_size) {
632 buf_count = frame_length / tx_buff_size;
633 if (frame_length % tx_buff_size) {
634 buf_count++;
635 }
636 } else {
637 buf_count = 1;
638 }
639
640 if (buf_count == 1) {
641 /*set the last and the first segment */
642 dma_tx_desc->tdes0_bm.own = 0;
643 dma_tx_desc->tdes0_bm.ic = 0;
644 dma_tx_desc->tdes0_bm.fs = 1;
645 dma_tx_desc->tdes0_bm.ls = 1;
646 dma_tx_desc->tdes0_bm.dc = 1;
647 dma_tx_desc->tdes0_bm.dp = 0;
648 dma_tx_desc->tdes0_bm.crcr = 1;
649 dma_tx_desc->tdes0_bm.cic = 3;
650 dma_tx_desc->tdes1_bm.saic = 2;
651
652 /* set the frame size */
653 dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
654 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
655 dma_tx_desc->tdes0 |= 1 << 31;
656 ptr->DMA_TX_POLL_DEMAND = 1;
657
658 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
659 } else {
660 for (i = 0; i < buf_count; i++) {
661 /* clear first and last segment bits */
662 dma_tx_desc->tdes0_bm.fs = 0;
663 dma_tx_desc->tdes0_bm.ls = 0;
664
665 if (i == 0) {
666 /* setting the first segment bit */
667 dma_tx_desc->tdes0_bm.fs = 1;
668 }
669
670 /* set the buffer 1 size */
671 dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
672
673 if (i == (buf_count - 1)) {
674 /* set the last segment bit */
675 dma_tx_desc->tdes0_bm.ls = 1;
676 size = frame_length - (buf_count - 1) * tx_buff_size;
677 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
678
679 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
680 dma_tx_desc->tdes0 |= 1 << 31;
681 ptr->DMA_TX_POLL_DEMAND = 1;
682 }
683
684 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
685 }
686 }
687
688 tx_desc_list_cur = dma_tx_desc;
689 *parent_tx_desc_list_cur = tx_desc_list_cur;
690
691 return ENET_SUCCESS;
692 }
693
enet_dma_tx_desc_chain_init(ENET_Type * ptr,enet_desc_t * desc)694 void enet_dma_tx_desc_chain_init(ENET_Type *ptr, enet_desc_t *desc)
695 {
696 uint32_t i = 0;
697 enet_tx_desc_t *dma_tx_desc;
698
699 /* set the tx_desc_list_cur pointer with the first one of the dma_tx_desc_tab list */
700 desc->tx_desc_list_cur = desc->tx_desc_list_head;
701
702 /* fill each dma_tx_desc descriptor with the right values */
703 for (i = 0; i < desc->tx_buff_cfg.count; i++) {
704
705 /* get the pointer on the ith member of the Tx desc list */
706 dma_tx_desc = desc->tx_desc_list_head + i;
707
708 /* set second address chained bit */
709 dma_tx_desc->tdes0_bm.tch = 1;
710
711 /* set buffer 1 address pointer */
712 dma_tx_desc->tdes2_bm.buffer1 = (uint32_t)(&((uint8_t *)desc->tx_buff_cfg.buffer)[i * desc->tx_buff_cfg.size]);
713
714 /* link all Tx descriptors */
715 if (i < desc->tx_buff_cfg.count - 1) {
716 /* set next descriptor address register with the next descriptor base address */
717 dma_tx_desc->tdes3_bm.next_desc = (uint32_t)(desc->tx_desc_list_head + i + 1);
718 } else {
719 /* for last descriptor, set next descriptor address register equal to the first descriptor base address */
720 dma_tx_desc->tdes3_bm.next_desc = (uint32_t)desc->tx_desc_list_head;
721 }
722 }
723
724 /* set transmit descriptor list address register */
725 ptr->DMA_TX_DESC_LIST_ADDR = (uint32_t)desc->tx_desc_list_head;
726 }
727
enet_dma_rx_desc_chain_init(ENET_Type * ptr,enet_desc_t * desc)728 void enet_dma_rx_desc_chain_init(ENET_Type *ptr, enet_desc_t *desc)
729 {
730 uint32_t i = 0;
731 enet_rx_desc_t *dma_rx_desc;
732
733 /* set the rx_desc_list_cur pointer with the first one of the dma_rx_desc_tab list */
734 desc->rx_desc_list_cur = desc->rx_desc_list_head;
735 /* fill each dma_rx_desc descriptor with the right values */
736 for (i = 0; i < desc->rx_buff_cfg.count; i++) {
737 /* get the pointer on the ith member of the Rx desc list */
738 dma_rx_desc = desc->rx_desc_list_head + i;
739 /* set own bit of the rx descriptor status */
740 dma_rx_desc->rdes0_bm.own = 1;
741
742 /* set buffer 1 size and second address chained bit */
743 dma_rx_desc->rdes1_bm.rch = 1;
744 dma_rx_desc->rdes1_bm.rbs1 = desc->rx_buff_cfg.size;
745
746 /* set buffer 1 address pointer */
747 dma_rx_desc->rdes2_bm.buffer1 = (uint32_t)(&((uint8_t *)desc->rx_buff_cfg.buffer)[i * desc->rx_buff_cfg.size]);
748
749 /* link all Rx descriptors */
750 if (i < desc->rx_buff_cfg.count - 1) {
751 /* set next descriptor address register with next descriptor base address */
752 dma_rx_desc->rdes3_bm.next_desc = (uint32_t)(desc->rx_desc_list_head + i + 1);
753 } else {
754 /* for last descriptor, set next descriptor address register equal to the first descriptor base address */
755 dma_rx_desc->rdes3_bm.next_desc = (uint32_t)desc->rx_desc_list_head;
756 }
757 }
758 /* set receive descriptor list address register */
759 ptr->DMA_RX_DESC_LIST_ADDR = (uint32_t)desc->rx_desc_list_head;
760 }
761
enet_timestamp_enable(ENET_Type * ptr,bool enable)762 void enet_timestamp_enable(ENET_Type *ptr, bool enable)
763 {
764 /* enable the timestamp */
765 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSENA_MASK;
766 ptr->TS_CTRL |= ENET_TS_CTRL_TSENA_SET(enable);
767 }
768
enet_set_subsecond_increment(ENET_Type * ptr,uint8_t ssinc)769 void enet_set_subsecond_increment(ENET_Type *ptr, uint8_t ssinc)
770 {
771 ptr->SUB_SEC_INCR &= ~ENET_SUB_SEC_INCR_SSINC_MASK;
772 ptr->SUB_SEC_INCR |= ENET_SUB_SEC_INCR_SSINC_SET(ssinc);
773 }
774
enet_set_ptp_timestamp(ENET_Type * ptr,enet_ptp_ts_update_t * timestamp)775 void enet_set_ptp_timestamp(ENET_Type *ptr, enet_ptp_ts_update_t *timestamp)
776 {
777 ptr->SYST_SEC_UPD = timestamp->sec;
778 ptr->SYST_NSEC_UPD = timestamp->nsec;
779 ptr->TS_CTRL |= ENET_TS_CTRL_TSINIT_MASK;
780
781 while (ENET_TS_CTRL_TSINIT_GET(ptr->TS_CTRL) == 1) {
782
783 }
784 }
785
enet_get_ptp_timestamp(ENET_Type * ptr,enet_ptp_ts_system_t * timestamp)786 void enet_get_ptp_timestamp(ENET_Type *ptr, enet_ptp_ts_system_t *timestamp)
787 {
788 timestamp->sec = ptr->SYST_SEC;
789 timestamp->nsec = ptr->SYST_NSEC;
790 }
791
enet_update_ptp_timeoffset(ENET_Type * ptr,enet_ptp_ts_update_t * timeoffset)792 void enet_update_ptp_timeoffset(ENET_Type *ptr, enet_ptp_ts_update_t *timeoffset)
793 {
794 /* write the offset (positive or negative ) in the timestamp update high and low registers */
795 ptr->SYST_SEC_UPD = ENET_SYST_SEC_UPD_TSS_SET(timeoffset->sec);
796 ptr->SYST_NSEC_UPD = ENET_SYST_NSEC_UPD_ADDSUB_SET(timeoffset->sign) | ENET_SYST_NSEC_UPD_TSSS_SET(timeoffset->nsec);
797
798 /* update the timestamp */
799 ptr->TS_CTRL |= ENET_TS_CTRL_TSUPDT_MASK;
800
801 /* wait for the updating to finish */
802 while (ENET_TS_CTRL_TSUPDT_GET(ptr->TS_CTRL)) {
803
804 }
805 }
806
enet_adjust_ptp_time_freq(ENET_Type * ptr,int32_t adj)807 void enet_adjust_ptp_time_freq(ENET_Type *ptr, int32_t adj)
808 {
809 ptr->TS_ADDEND = (uint32_t)((int64_t)adj * ENET_ADJ_FREQ_BASE_ADDEND / (ENET_ONE_SEC_IN_NANOSEC - adj) + ENET_ADJ_FREQ_BASE_ADDEND);
810
811 ptr->TS_CTRL |= ENET_TS_CTRL_TSADDREG_MASK;
812
813 while (ENET_TS_CTRL_TSADDREG_GET(ptr->TS_CTRL)) {
814
815 }
816 }
817
enet_set_ptp_version(ENET_Type * ptr,enet_ptp_version_t ptp_ver)818 void enet_set_ptp_version(ENET_Type *ptr, enet_ptp_version_t ptp_ver)
819 {
820 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSVER2ENA_MASK;
821 ptr->TS_CTRL |= ENET_TS_CTRL_TSVER2ENA_SET(ptp_ver);
822 }
823
enet_enable_ptp_frame_type(ENET_Type * ptr,enet_ptp_frame_type_t ptp_frame_type,bool enable)824 hpm_stat_t enet_enable_ptp_frame_type(ENET_Type *ptr, enet_ptp_frame_type_t ptp_frame_type, bool enable)
825 {
826 hpm_stat_t stat = status_success;
827
828 if (ptp_frame_type == enet_ptp_frame_ipv4) {
829 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPV4ENA_MASK;
830 ptr->TS_CTRL |= ENET_TS_CTRL_TSIPV4ENA_SET(enable);
831 } else if (ptp_frame_type == enet_ptp_frame_ipv6) {
832 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPV6ENA_MASK;
833 ptr->TS_CTRL |= ENET_TS_CTRL_TSIPV6ENA_SET(enable);
834 } else if (ptp_frame_type == enet_ptp_frame_ethernet) {
835 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPENA_MASK;
836 ptr->TS_CTRL |= ENET_TS_CTRL_TSIPENA_SET(enable);
837 } else {
838 return status_invalid_argument;
839 }
840
841 return stat;
842 }
843
enet_set_snapshot_ptp_message_type(ENET_Type * ptr,enet_ts_ss_ptp_msg_t ts_ss_ptp_msg)844 void enet_set_snapshot_ptp_message_type(ENET_Type *ptr, enet_ts_ss_ptp_msg_t ts_ss_ptp_msg)
845 {
846 /* set ptp message type for snapshots */
847 ptr->TS_CTRL &= ~ENET_TS_CTRL_SNAPTYPSEL_MASK;
848 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSMSTRENA_MASK;
849 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSEVNTENA_MASK;
850 ptr->TS_CTRL |= ts_ss_ptp_msg << ENET_TS_CTRL_TSEVNTENA_SHIFT;
851 }
852
enet_init_ptp(ENET_Type * ptr,enet_ptp_config_t * config)853 void enet_init_ptp(ENET_Type *ptr, enet_ptp_config_t *config)
854 {
855 enet_mask_interrupt_event(ptr, ENET_INTR_STATUS_TSIS_MASK);
856
857 /* select the resolution of nanosecond */
858 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSCTRLSSR_MASK;
859 ptr->TS_CTRL |= ENET_TS_CTRL_TSCTRLSSR_SET(config->timestamp_rollover_mode);
860
861 /* enable timestamping */
862 ptr->TS_CTRL |= ENET_TS_CTRL_TSENALL_MASK | ENET_TS_CTRL_TSENA_MASK;
863
864 /* set sub-second increment */
865 ptr->SUB_SEC_INCR &= ~ENET_SUB_SEC_INCR_SSINC_MASK;
866 ptr->SUB_SEC_INCR |= ENET_SUB_SEC_INCR_SSINC_SET(config->ssinc);
867
868 if (config->update_method == enet_ptp_time_fine_update) {
869 /* set the addend */
870 ptr->TS_ADDEND = config->addend;
871
872 /* update the addend */
873 ptr->TS_CTRL |= ENET_TS_CTRL_TSADDREG_MASK;
874
875 /* poll the status of updating the addend */
876 while (ENET_TS_CTRL_TSADDREG_GET(ptr->TS_CTRL)) {
877
878 }
879
880 /* fine update */
881 ptr->TS_CTRL |= ENET_TS_CTRL_TSCFUPDT_MASK;
882 } else {
883 /* coarse update */
884 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSCFUPDT_MASK;
885 }
886 }
887
enet_set_pps0_control_output(ENET_Type * ptr,enet_pps_ctrl_t freq)888 void enet_set_pps0_control_output(ENET_Type *ptr, enet_pps_ctrl_t freq)
889 {
890 ptr->PPS_CTRL &= ~ENET_PPS_CTRL_PPSEN0_MASK;
891 ptr->PPS_CTRL |= ENET_PPS_CTRL_PPSCTRLCMD0_SET(freq);
892 }
893
enet_set_ppsx_command(ENET_Type * ptr,enet_pps_cmd_t cmd,enet_pps_idx_t idx)894 hpm_stat_t enet_set_ppsx_command(ENET_Type *ptr, enet_pps_cmd_t cmd, enet_pps_idx_t idx)
895 {
896 if (idx >= ENET_SOC_PPS_MAX_COUNT) {
897 return status_invalid_argument;
898 }
899
900 /* Wait the last command to complete */
901 while (ptr->PPS_CTRL & (ENET_PPS_CMD_MASK << ((idx + 1) << ENET_PPS_CMD_OFS_FAC))) {
902
903 }
904
905 /* Set the specified pps output with a specified command */
906 ptr->PPS_CTRL |= cmd << ((idx + 1) << ENET_PPS_CMD_OFS_FAC);
907
908 return status_success;
909 }
910
enet_set_ppsx_config(ENET_Type * ptr,enet_pps_cmd_config_t * cmd_cfg,enet_pps_idx_t idx)911 hpm_stat_t enet_set_ppsx_config(ENET_Type *ptr, enet_pps_cmd_config_t *cmd_cfg, enet_pps_idx_t idx)
912 {
913 if (idx >= ENET_SOC_PPS_MAX_COUNT) {
914 return status_invalid_argument;
915 }
916
917 /* Set the interval and width for PPSx */
918 if (idx == enet_pps_0) {
919 ptr->PPS0_INTERVAL = cmd_cfg->pps_interval - 1;
920 ptr->PPS0_WIDTH = cmd_cfg->pps_width - 1;
921 } else {
922 ptr->PPS[idx].INTERVAL = cmd_cfg->pps_interval - 1;
923 ptr->PPS[idx].WIDTH = cmd_cfg->pps_width - 1;
924 }
925
926 /* Set the target timestamp */
927 if (idx == enet_pps_0) {
928 ptr->TGTTM_SEC = cmd_cfg->target_sec;
929 ptr->TGTTM_NSEC = cmd_cfg->target_nsec;
930 } else {
931 ptr->PPS[idx].TGTTM_SEC = cmd_cfg->target_sec;
932 ptr->PPS[idx].TGTTM_NSEC = cmd_cfg->target_nsec;
933 }
934
935 /* Set PPS0 as the command function */
936 if (idx == enet_pps_0) {
937 ptr->PPS_CTRL |= ENET_PPS_CTRL_PPSEN0_MASK;
938 }
939
940 #if ENET_SOC_PPS1_EN
941 if (idx == enet_pps_1) {
942 ptr->PPS_CTRL |= ENET_PPS_CTRL_PPSEN1_MASK;
943 }
944 #endif
945
946 /* Wait the last command to complete */
947 while (ptr->PPS_CTRL & (ENET_PPS_CMD_MASK << ((idx + 1) << ENET_PPS_CMD_OFS_FAC))) {
948
949 }
950
951 /* Initialize with the No Command */
952 ptr->PPS_CTRL &= ~(ENET_PPS_CMD_MASK << ((idx + 1) << ENET_PPS_CMD_OFS_FAC));
953
954 return status_success;
955 }
956