• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2023 HPMicro
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  */
7 
8 /*---------------------------------------------------------------------
9  * Includes
10  *---------------------------------------------------------------------
11  */
12 #include "hpm_enet_drv.h"
13 #include "hpm_enet_soc_drv.h"
14 
15 /*---------------------------------------------------------------------
16  * Internal API
17  *---------------------------------------------------------------------
18  */
enet_mode_init(ENET_Type * ptr,uint32_t intr)19 static void enet_mode_init(ENET_Type *ptr, uint32_t intr)
20 {
21     /* receive and transmit store and forward */
22     ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_RSF_MASK | ENET_DMA_OP_MODE_TSF_MASK;
23 
24     /* enalbe hardware flow control */
25     ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_EFC_MASK;
26 
27     /* enable error frame and undersized good frame forwarding */
28     ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_FEF_MASK;
29 
30     /* disable osf mode */
31     ptr->DMA_OP_MODE &= ~ENET_DMA_OP_MODE_OSF_MASK;
32 
33     ptr->DMA_INTR_EN |= intr;
34 
35     while (ENET_DMA_BUS_STATUS_AXIRDSTS_GET(ptr->DMA_BUS_STATUS) || ENET_DMA_BUS_STATUS_AXWHSTS_GET(ptr->DMA_BUS_STATUS)) {
36     }
37 
38     /* start the receive and transmit dma */
39     ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_ST_MASK | ENET_DMA_OP_MODE_SR_MASK;
40 }
41 
enet_dma_init(ENET_Type * ptr,enet_desc_t * desc,uint32_t intr,uint8_t pbl)42 static int enet_dma_init(ENET_Type *ptr, enet_desc_t *desc, uint32_t intr, uint8_t pbl)
43 {
44     /* generate software reset */
45     ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_SWR_MASK;
46 
47     /* wait for the completion of reset process */
48     while (ENET_DMA_BUS_MODE_SWR_GET(ptr->DMA_BUS_MODE)) {
49     }
50 
51     /* initialize bus mode register */
52     ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_AAL_MASK;
53 
54     ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_FB_MASK;
55 
56     /* enable pblx8 mode */
57     ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_PBLX8_MASK;
58 
59     /* set programmable burst length */
60     ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_PBL_SHIFT;
61     ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_PBL_SET(pbl);
62 
63     /* disable separate pbl */
64     ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_USP_MASK;
65 
66     /* descriptor length */
67 #if ENET_SOC_ALT_EHD_DES_LEN == ENET_SOC_ALT_EHD_DES_MIN_LEN
68     ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_ATDS_MASK;
69 #elif ENET_SOC_ALT_EHD_DES_LEN == ENET_SOC_ALT_EHD_DES_MAX_LEN
70     ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_ATDS_MASK;
71     #endif
72 
73     /* set the maximum enabled burst length */
74     if (ENET_DMA_BUS_MODE_FB_GET(ptr->DMA_BUS_MODE) == 0) {
75         ptr->DMA_AXI_MODE |= ENET_DMA_AXI_MODE_BLEN4_MASK | ENET_DMA_AXI_MODE_BLEN8_MASK | ENET_DMA_AXI_MODE_BLEN16_MASK;
76     } else {
77         /* TODO: set BLENX_MASK */
78     }
79 
80     /* initialize Tx descriptors list: chain mode */
81     enet_dma_tx_desc_chain_init(ptr, desc);
82 
83     /* initialize Rx descriptors list: Chain Mode  */
84     enet_dma_rx_desc_chain_init(ptr, desc);
85 
86     enet_mode_init(ptr, intr);
87 
88     enet_dma_flush(ptr);
89 
90     return true;
91 }
92 
enet_mac_init(ENET_Type * ptr,enet_mac_config_t * config,enet_inf_type_t inf_type)93 static int enet_mac_init(ENET_Type *ptr, enet_mac_config_t *config, enet_inf_type_t inf_type)
94 {
95     for (int i = 0; i < config->valid_max_count; i++) {
96         if (i == 0) {
97             ptr->MAC_ADDR_0_HIGH &= ~ENET_MAC_ADDR_0_HIGH_ADDRHI_MASK;
98             ptr->MAC_ADDR_0_LOW  &= ~ENET_MAC_ADDR_0_LOW_ADDRLO_MASK;
99             ptr->MAC_ADDR_0_HIGH |= ENET_MAC_ADDR_0_HIGH_ADDRHI_SET(config->mac_addr_high[i]);
100             ptr->MAC_ADDR_0_LOW  |= ENET_MAC_ADDR_0_LOW_ADDRLO_SET(config->mac_addr_low[i]);
101         } else {
102             ptr->MAC_ADDR[i-1].HIGH &= ~ENET_MAC_ADDR_HIGH_ADDRHI_MASK;
103             ptr->MAC_ADDR[i-1].LOW  &= ~ENET_MAC_ADDR_LOW_ADDRLO_MASK;
104             ptr->MAC_ADDR[i-1].HIGH |= ENET_MAC_ADDR_HIGH_AE_MASK | ENET_MAC_ADDR_HIGH_ADDRHI_SET(config->mac_addr_high[i]);
105             ptr->MAC_ADDR[i-1].LOW  |= ENET_MAC_ADDR_LOW_ADDRLO_SET(config->mac_addr_low[i]);
106         }
107     }
108 
109     /* set the appropriate filters for the incoming frames */
110     ptr->MACFF |= ENET_MACFF_RA_SET(1);      /* receive all */
111 
112     /* replace the content of the mac address 0 in the sa field of all transmitted frames */
113     ptr->MACCFG &= ~ENET_MACCFG_SARC_MASK;
114     ptr->MACCFG |= ENET_MACCFG_SARC_SET(config->sarc);
115 
116     ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
117 
118     if (inf_type == enet_inf_rgmii) {
119         ptr->MACCFG &= ~ENET_MACCFG_PS_MASK;
120     } else if (inf_type == enet_inf_rmii) {
121         ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
122     } else {
123         return status_invalid_argument;
124     }
125 
126     ptr->MACCFG |= ENET_MACCFG_DM_MASK;
127 
128     if (ENET_MACCFG_DM_GET(ptr->MACCFG) == 0) {
129         ptr->MACCFG |= ENET_MACCFG_IFG_SET(4);
130     } else {
131         ptr->MACCFG |= ENET_MACCFG_IFG_SET(2);
132     }
133 
134 
135     /* enable transmitter enable and receiver */
136     ptr->MACCFG |= ENET_MACCFG_TE_MASK | ENET_MACCFG_RE_MASK;
137 
138     return true;
139 }
140 
enet_mask_interrupt_event(ENET_Type * ptr,uint32_t mask)141 static void enet_mask_interrupt_event(ENET_Type *ptr, uint32_t mask)
142 {
143     /* mask the specified interrupts */
144     ptr->INTR_MASK |= mask;
145 }
146 
147 /*---------------------------------------------------------------------
148  * Driver API
149  *---------------------------------------------------------------------
150  */
enet_get_interrupt_status(ENET_Type * ptr)151 uint32_t enet_get_interrupt_status(ENET_Type *ptr)
152 {
153     return ptr->INTR_STATUS;
154 }
155 
enet_mask_mmc_rx_interrupt_event(ENET_Type * ptr,uint32_t mask)156 void enet_mask_mmc_rx_interrupt_event(ENET_Type *ptr, uint32_t mask)
157 {
158     ptr->MMC_INTR_MASK_RX |= mask;
159 }
160 
enet_get_mmc_rx_interrupt_status(ENET_Type * ptr)161 uint32_t enet_get_mmc_rx_interrupt_status(ENET_Type *ptr)
162 {
163     return ptr->MMC_INTR_RX;
164 }
165 
enet_mask_mmc_tx_interrupt_event(ENET_Type * ptr,uint32_t mask)166 void enet_mask_mmc_tx_interrupt_event(ENET_Type *ptr, uint32_t mask)
167 {
168     ptr->MMC_INTR_MASK_TX |= mask;
169 }
170 
enet_get_mmc_tx_interrupt_status(ENET_Type * ptr)171 uint32_t enet_get_mmc_tx_interrupt_status(ENET_Type *ptr)
172 {
173     return ptr->MMC_INTR_TX;
174 }
175 
enet_dma_flush(ENET_Type * ptr)176 void enet_dma_flush(ENET_Type *ptr)
177 {
178     /* flush DMA transmit FIFO */
179     ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_FTF_MASK;
180     while (ENET_DMA_OP_MODE_FTF_GET(ptr->DMA_OP_MODE)) {
181 
182     }
183 }
184 
enet_write_phy(ENET_Type * ptr,uint32_t phy_addr,uint32_t addr,uint32_t data)185 void enet_write_phy(ENET_Type *ptr, uint32_t phy_addr, uint32_t addr, uint32_t data)
186 {
187     /* set data to be written */
188     ptr->GMII_DATA = ENET_GMII_DATA_GD_SET(data);
189 
190     /* set phy address , register address, write operation and busy flag */
191     ptr->GMII_ADDR = ENET_GMII_ADDR_PA_SET(phy_addr)
192                    | ENET_GMII_ADDR_GR_SET(addr)
193                    | ENET_GMII_ADDR_CR_SET(enet_csr_150m_to_250m_mdc_csr_div_102)
194                    | ENET_GMII_ADDR_GW_SET(enet_phy_op_write)
195                    | ENET_GMII_ADDR_GB_SET(enet_gmii_busy);
196 
197     /* wait until the write operation is completed */
198     while (ENET_GMII_ADDR_GB_GET(ptr->GMII_ADDR)) {
199     }
200 }
201 
enet_read_phy(ENET_Type * ptr,uint32_t phy_addr,uint32_t addr)202 uint16_t enet_read_phy(ENET_Type *ptr, uint32_t phy_addr, uint32_t addr)
203 {
204     /* set phy address, register address, read operation and busy flag */
205     ptr->GMII_ADDR = ENET_GMII_ADDR_PA_SET(phy_addr)
206                    | ENET_GMII_ADDR_GR_SET(addr)
207                    | ENET_GMII_ADDR_CR_SET(enet_csr_150m_to_250m_mdc_csr_div_102)
208                    | ENET_GMII_ADDR_GW_SET(enet_phy_op_read)
209                    | ENET_GMII_ADDR_GB_SET(enet_gmii_busy);
210 
211     /* wait until the write operation is completed */
212     while (ENET_GMII_ADDR_GB_GET(ptr->GMII_ADDR)) {
213     }
214 
215     /* read and return data */
216     return (uint16_t)ENET_GMII_DATA_GD_GET(ptr->GMII_DATA);
217 }
218 
enet_set_line_speed(ENET_Type * ptr,enet_line_speed_t speed)219 void enet_set_line_speed(ENET_Type *ptr, enet_line_speed_t speed)
220 {
221     ptr->MACCFG &= ~(ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK);
222     ptr->MACCFG |= speed << ENET_MACCFG_FES_SHIFT;
223 }
224 
enet_set_duplex_mode(ENET_Type * ptr,enet_duplex_mode_t mode)225 void enet_set_duplex_mode(ENET_Type *ptr, enet_duplex_mode_t mode)
226 {
227     ptr->MACCFG &= ~ENET_MACCFG_DM_MASK;
228     ptr->MACCFG |= ENET_MACCFG_DM_SET(mode);
229 }
230 
enet_controller_init(ENET_Type * ptr,enet_inf_type_t inf_type,enet_desc_t * desc,enet_mac_config_t * config,enet_int_config_t * int_config)231 hpm_stat_t enet_controller_init(ENET_Type *ptr, enet_inf_type_t inf_type, enet_desc_t *desc, enet_mac_config_t *config, enet_int_config_t *int_config)
232 {
233     /* select an interface */
234     enet_intf_selection(ptr, inf_type);
235 
236     /* initialize DMA */
237     enet_dma_init(ptr, desc, int_config->int_enable, config->dma_pbl);
238 
239     /* initialize MAC */
240     enet_mac_init(ptr, config, inf_type);
241 
242     /* mask the specified interrupts */
243     enet_mask_interrupt_event(ptr, int_config->int_mask);
244 
245     /* mask the mmc rx interrupts */
246     enet_mask_mmc_rx_interrupt_event(ptr, int_config->mmc_intr_mask_rx);
247 
248     /* mask the mmc tx interrupts */
249     enet_mask_mmc_tx_interrupt_event(ptr, int_config->mmc_intr_mask_tx);
250 
251     return status_success;
252 }
253 
254 /*****************************************************************************
255  *                           DMA API
256  ****************************************************************************/
enet_rx_resume(ENET_Type * ptr)257 void enet_rx_resume(ENET_Type *ptr)
258 {
259     if (ENET_DMA_STATUS_RU_GET(ptr->DMA_STATUS)) {
260         ptr->DMA_STATUS = ENET_DMA_STATUS_RU_MASK;
261         ptr->DMA_RX_POLL_DEMAND = 1;
262     }
263 }
264 
enet_check_received_frame(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info)265 uint32_t enet_check_received_frame(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info)
266 {
267     enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
268 
269     /* check if the last segment */
270     if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
271         (rx_desc_list_cur->rdes0_bm.ls == 1)) {
272         rx_frame_info->seg_count++;
273         if (rx_frame_info->seg_count == 1) {
274             rx_frame_info->fs_rx_desc = rx_desc_list_cur;
275         }
276         rx_frame_info->ls_rx_desc = rx_desc_list_cur;
277         return 1;
278     }
279     /* check if the first segment */
280     else if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
281              (rx_desc_list_cur->rdes0_bm.fs == 1) &&
282              (rx_desc_list_cur->rdes0_bm.ls == 0)) {
283         rx_frame_info->fs_rx_desc = rx_desc_list_cur;
284         rx_frame_info->ls_rx_desc = NULL;
285         rx_frame_info->seg_count = 1;
286         rx_desc_list_cur = (enet_rx_desc_t *)rx_desc_list_cur->rdes3_bm.next_desc;
287         *parent_rx_desc_list_cur = rx_desc_list_cur;
288     }
289 
290     /* check if intermediate segments */
291     else if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
292              (rx_desc_list_cur->rdes0_bm.fs == 0) &&
293              (rx_desc_list_cur->rdes0_bm.ls == 0)) {
294         rx_frame_info->seg_count++;
295         rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
296         *parent_rx_desc_list_cur = rx_desc_list_cur;
297     }
298 
299     return 0;
300 }
301 
enet_get_received_frame(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info)302 enet_frame_t enet_get_received_frame(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info)
303 {
304     uint32_t frame_length = 0;
305     enet_frame_t frame = {0, 0, 0};
306     enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
307 
308     /* get the frame length of the received packet: substruct 4 bytes of the CRC */
309     frame_length = rx_desc_list_cur->rdes0_bm.fl - 4;
310     frame.length = frame_length;
311 
312     /* get the address of the first frame descriptor and the buffer start address */
313     frame.rx_desc = rx_frame_info->fs_rx_desc;
314     frame.buffer = rx_frame_info->fs_rx_desc->rdes2_bm.buffer1;
315 
316     /* update the Ethernet dma global Rx descriptor with next Rx descriptor */
317     /* chained mode */
318     /* selects the next dma Rx descriptor list for next buffer to read */
319     rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
320     *parent_rx_desc_list_cur = rx_desc_list_cur;
321 
322     return frame;
323 }
324 
enet_get_received_frame_interrupt(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info,uint32_t rx_desc_count)325 enet_frame_t enet_get_received_frame_interrupt(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info, uint32_t rx_desc_count)
326 {
327     enet_frame_t frame = {0, 0, 0};
328     uint32_t desc_scan_counter = 0;
329     enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
330 
331     /* scan descriptors owned by CPU */
332     while ((rx_desc_list_cur->rdes0_bm.own == 0) &&
333         (desc_scan_counter < rx_desc_count)) {
334 
335         desc_scan_counter++;
336 
337         /* check if first segment in frame */
338         if ((rx_desc_list_cur->rdes0_bm.fs == 1) &&
339             (rx_desc_list_cur->rdes0_bm.ls == 0)) {
340             rx_frame_info->fs_rx_desc = rx_desc_list_cur;
341             rx_frame_info->seg_count = 1;
342             rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
343             *parent_rx_desc_list_cur = rx_desc_list_cur;
344         }
345 
346         /* check if intermediate segment */
347         else if ((rx_desc_list_cur->rdes0_bm.ls == 0) &&
348                  (rx_desc_list_cur->rdes0_bm.fs == 0)) {
349             rx_frame_info->seg_count++;
350             rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
351             *parent_rx_desc_list_cur = rx_desc_list_cur;
352         }
353 
354         /* should be last segment */
355         else {
356             /* last segment */
357             rx_frame_info->ls_rx_desc = rx_desc_list_cur;
358 
359             rx_frame_info->seg_count++;
360 
361             /* first segment is last segment */
362             if (rx_frame_info->seg_count == 1) {
363                 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
364             }
365 
366             /* get the frame length of the received packet: substruct 4 bytes of the crc */
367             frame.length = rx_desc_list_cur->rdes0_bm.fl - 4;
368 
369             /* get the address of the buffer start address */
370             /* check if more than one segment in the frame */
371             if (rx_frame_info->seg_count > 1) {
372                 frame.buffer = rx_frame_info->fs_rx_desc->rdes2_bm.buffer1;
373             } else {
374                 frame.buffer = rx_desc_list_cur->rdes2_bm.buffer1;
375             }
376 
377             frame.rx_desc = rx_frame_info->fs_rx_desc;
378 
379             rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
380             *parent_rx_desc_list_cur = rx_desc_list_cur;
381 
382             return frame;
383         }
384     }
385 
386     return frame;
387 }
388 
enet_get_default_tx_control_config(ENET_Type * ptr,enet_tx_control_config_t * config)389 void enet_get_default_tx_control_config(ENET_Type *ptr, enet_tx_control_config_t *config)
390 {
391     (void) ptr;
392     config->enable_ioc  = false;
393     config->disable_crc = true;
394     config->disable_pad = false;
395     config->enable_ttse = false;
396     config->enable_crcr = true;
397     config->cic         = enet_cic_ip_pseudoheader;
398     config->vlic        = enet_vlic_disable;
399     config->saic        = enet_saic_disable;
400 }
401 
enet_prepare_tx_desc_with_ts_record(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,enet_tx_control_config_t * config,uint16_t frame_length,uint16_t tx_buff_size,enet_ptp_ts_system_t * timestamp)402 uint32_t enet_prepare_tx_desc_with_ts_record(ENET_Type *ptr,
403                                              enet_tx_desc_t **parent_tx_desc_list_cur,
404                                              enet_tx_control_config_t *config,
405                                              uint16_t frame_length, uint16_t tx_buff_size,
406                                              enet_ptp_ts_system_t *timestamp)
407 {
408     uint32_t buf_count = 0, size = 0, i = 0;
409     uint32_t retry_cnt = ENET_RETRY_CNT;
410     enet_tx_desc_t *dma_tx_desc;
411     enet_tx_desc_t *tx_desc_list_cur = *parent_tx_desc_list_cur;
412 
413     if (tx_buff_size == 0) {
414         return ENET_ERROR;
415     }
416     /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
417 
418     dma_tx_desc = tx_desc_list_cur;
419 
420     if (frame_length > tx_buff_size) {
421         buf_count = frame_length / tx_buff_size;
422         if (frame_length % tx_buff_size) {
423             buf_count++;
424         }
425     } else {
426         buf_count = 1;
427     }
428 
429     if (buf_count == 1) {
430         /*set the last and the first segment */
431         dma_tx_desc->tdes0_bm.own  = 0;
432         dma_tx_desc->tdes0_bm.fs   = 1;
433         dma_tx_desc->tdes0_bm.ls   = 1;
434         dma_tx_desc->tdes0_bm.ic   = config->enable_ioc;
435         dma_tx_desc->tdes0_bm.dc   = config->disable_crc;
436         dma_tx_desc->tdes0_bm.dp   = config->disable_pad;
437         dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
438         dma_tx_desc->tdes0_bm.cic  = config->cic;
439         dma_tx_desc->tdes0_bm.vlic = config->vlic;
440         dma_tx_desc->tdes0_bm.ttse = config->enable_ttse;
441         dma_tx_desc->tdes1_bm.saic = config->saic;
442         /* set the frame size */
443         dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
444         /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
445         dma_tx_desc->tdes0_bm.own = 1;
446         ptr->DMA_TX_POLL_DEMAND = 1;
447 
448         if (dma_tx_desc->tdes0_bm.ttse == true) {
449             do {
450 
451             } while (dma_tx_desc->tdes0_bm.own == 1 && retry_cnt-- > 0);
452 
453             if (retry_cnt == 0) {
454                 return ENET_ERROR;
455             }
456 
457             timestamp->sec  = dma_tx_desc->tdes7_bm.ttsh;
458             timestamp->nsec = dma_tx_desc->tdes6_bm.ttsl;
459         }
460 
461         dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
462     } else {
463         for (i = 0; i < buf_count; i++) {
464             /* get the next available tx descriptor */
465             dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
466 
467             /* clear first and last segment bits */
468             dma_tx_desc->tdes0_bm.fs = 0;
469             dma_tx_desc->tdes0_bm.ls = 0;
470 
471             if (i == 0) {
472                 /* setting the first segment bit */
473                 dma_tx_desc->tdes0_bm.fs = 1;
474                 dma_tx_desc->tdes0_bm.dc   = config->disable_crc;
475                 dma_tx_desc->tdes0_bm.dp   = config->disable_pad;
476                 dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
477                 dma_tx_desc->tdes0_bm.cic  = config->cic;
478                 dma_tx_desc->tdes0_bm.vlic = config->vlic;
479                 dma_tx_desc->tdes0_bm.ttse = config->enable_ttse;
480                 dma_tx_desc->tdes1_bm.saic = config->saic;
481 
482                 if (dma_tx_desc->tdes0_bm.ttse == true) {
483                     do {
484 
485                     } while (dma_tx_desc->tdes0_bm.own == 1 && retry_cnt-- > 0);
486 
487                     if (retry_cnt == 0) {
488                         return ENET_ERROR;
489                     }
490 
491                     timestamp->sec  = dma_tx_desc->tdes7_bm.ttsh;
492                     timestamp->nsec = dma_tx_desc->tdes6_bm.ttsl;
493                 }
494             }
495 
496             /* set the buffer 1 size */
497             dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
498 
499             if (i == (buf_count - 1)) {
500                 /* set the last segment bit */
501                 dma_tx_desc->tdes0_bm.ls = 1;
502                 dma_tx_desc->tdes0_bm.ic   = config->enable_ioc;
503                 size = frame_length - (buf_count - 1) * tx_buff_size;
504                 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
505 
506                 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
507                 dma_tx_desc->tdes0_bm.own = 1;
508                 ptr->DMA_TX_POLL_DEMAND = 1;
509             }
510         }
511     }
512 
513     tx_desc_list_cur = dma_tx_desc;
514     *parent_tx_desc_list_cur = tx_desc_list_cur;
515 
516     return ENET_SUCCESS;
517 }
518 
enet_prepare_tx_desc(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,enet_tx_control_config_t * config,uint16_t frame_length,uint16_t tx_buff_size)519 uint32_t enet_prepare_tx_desc(ENET_Type *ptr, enet_tx_desc_t **parent_tx_desc_list_cur, enet_tx_control_config_t *config, uint16_t frame_length, uint16_t tx_buff_size)
520 {
521     uint32_t buf_count = 0, size = 0, i = 0;
522     enet_tx_desc_t *dma_tx_desc;
523     enet_tx_desc_t *tx_desc_list_cur = *parent_tx_desc_list_cur;
524 
525     if (tx_buff_size == 0) {
526         return ENET_ERROR;
527     }
528     /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
529     dma_tx_desc = tx_desc_list_cur;
530     if (frame_length > tx_buff_size) {
531         buf_count = frame_length / tx_buff_size;
532         if (frame_length % tx_buff_size) {
533             buf_count++;
534         }
535     } else {
536         buf_count = 1;
537     }
538 
539     if (buf_count == 1) {
540         /*set the last and the first segment */
541         dma_tx_desc->tdes0_bm.own  = 0;
542         dma_tx_desc->tdes0_bm.fs   = 1;
543         dma_tx_desc->tdes0_bm.ls   = 1;
544         dma_tx_desc->tdes0_bm.ic   = config->enable_ioc;
545         dma_tx_desc->tdes0_bm.dc   = config->disable_crc;
546         dma_tx_desc->tdes0_bm.dp   = config->disable_pad;
547         dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
548         dma_tx_desc->tdes0_bm.cic  = config->cic;
549         dma_tx_desc->tdes0_bm.vlic = config->vlic;
550         dma_tx_desc->tdes1_bm.saic = config->saic;
551         /* set the frame size */
552         dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
553         /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
554         dma_tx_desc->tdes0_bm.own = 1;
555         ptr->DMA_TX_POLL_DEMAND = 1;
556 
557         dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
558     } else {
559         for (i = 0; i < buf_count; i++) {
560             /* clear first and last segment bits */
561             dma_tx_desc->tdes0_bm.fs = 0;
562             dma_tx_desc->tdes0_bm.ls = 0;
563 
564             if (i == 0) {
565                 /* setting the first segment bit */
566                 dma_tx_desc->tdes0_bm.fs = 1;
567                 dma_tx_desc->tdes0_bm.dc   = config->disable_crc;
568                 dma_tx_desc->tdes0_bm.dp   = config->disable_pad;
569                 dma_tx_desc->tdes0_bm.crcr = config->enable_crcr;
570                 dma_tx_desc->tdes0_bm.cic  = config->cic;
571                 dma_tx_desc->tdes0_bm.vlic = config->vlic;
572                 dma_tx_desc->tdes1_bm.saic = config->saic;
573             }
574 
575             /* set the buffer 1 size */
576             dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
577 
578             if (i == (buf_count - 1)) {
579                 /* set the last segment bit */
580                 dma_tx_desc->tdes0_bm.ls = 1;
581                 dma_tx_desc->tdes0_bm.ic   = config->enable_ioc;
582                 size = frame_length - (buf_count - 1) * tx_buff_size;
583                 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
584 
585                 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
586                 dma_tx_desc->tdes0_bm.own = 1;
587                 ptr->DMA_TX_POLL_DEMAND = 1;
588             }
589 
590             dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
591         }
592     }
593 
594     tx_desc_list_cur = dma_tx_desc;
595     *parent_tx_desc_list_cur = tx_desc_list_cur;
596 
597     return ENET_SUCCESS;
598 }
599 
enet_prepare_transmission_descriptors(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,uint16_t frame_length,uint16_t tx_buff_size)600 uint32_t enet_prepare_transmission_descriptors(ENET_Type *ptr, enet_tx_desc_t **parent_tx_desc_list_cur, uint16_t frame_length, uint16_t tx_buff_size)
601 {
602     uint32_t buf_count = 0, size = 0, i = 0;
603     enet_tx_desc_t *dma_tx_desc;
604     enet_tx_desc_t  *tx_desc_list_cur = *parent_tx_desc_list_cur;
605 
606     if (tx_buff_size == 0) {
607         return ENET_ERROR;
608     }
609     /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
610     dma_tx_desc = tx_desc_list_cur;
611     if (frame_length > tx_buff_size) {
612         buf_count = frame_length / tx_buff_size;
613         if (frame_length % tx_buff_size) {
614             buf_count++;
615         }
616     } else {
617         buf_count = 1;
618     }
619 
620     if (buf_count == 1) {
621         /*set the last and the first segment */
622         dma_tx_desc->tdes0_bm.own = 0;
623         dma_tx_desc->tdes0_bm.ic = 0;
624         dma_tx_desc->tdes0_bm.fs = 1;
625         dma_tx_desc->tdes0_bm.ls = 1;
626         dma_tx_desc->tdes0_bm.dc = 1;
627         dma_tx_desc->tdes0_bm.dp = 0;
628         dma_tx_desc->tdes0_bm.crcr = 1;
629         dma_tx_desc->tdes0_bm.cic = 3;
630         dma_tx_desc->tdes1_bm.saic = 2;
631 
632         /* set the frame size */
633         dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
634         /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
635         dma_tx_desc->tdes0_bm.own = 1;
636         ptr->DMA_TX_POLL_DEMAND = 1;
637 
638         dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
639     } else {
640         for (i = 0; i < buf_count; i++) {
641             /* clear first and last segment bits */
642             dma_tx_desc->tdes0_bm.fs = 0;
643             dma_tx_desc->tdes0_bm.ls = 0;
644 
645             if (i == 0) {
646                 /* setting the first segment bit */
647                 dma_tx_desc->tdes0_bm.fs = 1;
648             }
649 
650             /* set the buffer 1 size */
651             dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
652 
653             if (i == (buf_count - 1)) {
654                 /* set the last segment bit */
655                 dma_tx_desc->tdes0_bm.ls = 1;
656                 size = frame_length - (buf_count - 1) * tx_buff_size;
657                 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
658 
659                 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
660                 dma_tx_desc->tdes0_bm.own = 1;
661                 ptr->DMA_TX_POLL_DEMAND = 1;
662             }
663 
664             dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
665         }
666     }
667 
668     tx_desc_list_cur = dma_tx_desc;
669     *parent_tx_desc_list_cur = tx_desc_list_cur;
670 
671     return ENET_SUCCESS;
672 }
673 
enet_dma_tx_desc_chain_init(ENET_Type * ptr,enet_desc_t * desc)674 void enet_dma_tx_desc_chain_init(ENET_Type *ptr, enet_desc_t *desc)
675 {
676     uint32_t i = 0;
677     enet_tx_desc_t *dma_tx_desc;
678 
679     /* set the tx_desc_list_cur pointer with the first one of the dma_tx_desc_tab list */
680     desc->tx_desc_list_cur = desc->tx_desc_list_head;
681 
682     /* fill each dma_tx_desc descriptor with the right values */
683     for (i = 0; i < desc->tx_buff_cfg.count; i++) {
684 
685         /* get the pointer on the ith member of the Tx desc list */
686         dma_tx_desc = desc->tx_desc_list_head + i;
687 
688         /* set second address chained bit */
689         dma_tx_desc->tdes0_bm.tch = 1;
690 
691         /* set buffer 1 address pointer */
692         dma_tx_desc->tdes2_bm.buffer1 = (uint32_t)(&((uint8_t *)desc->tx_buff_cfg.buffer)[i * desc->tx_buff_cfg.size]);
693 
694         /* link all Tx descriptors */
695         if (i < desc->tx_buff_cfg.count - 1) {
696             /* set next descriptor address register with the next descriptor base address */
697             dma_tx_desc->tdes3_bm.next_desc = (uint32_t)(desc->tx_desc_list_head + i + 1);
698         } else {
699             /* for last descriptor, set next descriptor address register equal to the first descriptor base address */
700             dma_tx_desc->tdes3_bm.next_desc = (uint32_t)desc->tx_desc_list_head;
701         }
702     }
703 
704     /* set transmit descriptor list address register */
705     ptr->DMA_TX_DESC_LIST_ADDR = (uint32_t)desc->tx_desc_list_head;
706 }
707 
enet_dma_rx_desc_chain_init(ENET_Type * ptr,enet_desc_t * desc)708 void enet_dma_rx_desc_chain_init(ENET_Type *ptr,  enet_desc_t *desc)
709 {
710     uint32_t i = 0;
711     enet_rx_desc_t *dma_rx_desc;
712 
713     /* set the rx_desc_list_cur pointer with the first one of the dma_rx_desc_tab list */
714     desc->rx_desc_list_cur = desc->rx_desc_list_head;
715     /* fill each dma_rx_desc descriptor with the right values */
716     for (i = 0; i < desc->rx_buff_cfg.count; i++) {
717         /* get the pointer on the ith member of the Rx desc list */
718         dma_rx_desc = desc->rx_desc_list_head + i;
719         /* set own bit of the rx descriptor status */
720         dma_rx_desc->rdes0_bm.own = 1;
721 
722         /* set buffer 1 size and second address chained bit */
723         dma_rx_desc->rdes1_bm.rch = 1;
724         dma_rx_desc->rdes1_bm.rbs1 = desc->rx_buff_cfg.size;
725 
726         /* set buffer 1 address pointer */
727         dma_rx_desc->rdes2_bm.buffer1 = (uint32_t)(&((uint8_t *)desc->rx_buff_cfg.buffer)[i * desc->rx_buff_cfg.size]);
728 
729         /* link all Rx descriptors */
730         if (i < desc->rx_buff_cfg.count - 1) {
731             /* set next descriptor address register with next descriptor base address */
732             dma_rx_desc->rdes3_bm.next_desc = (uint32_t)(desc->rx_desc_list_head + i + 1);
733         } else {
734             /* for last descriptor, set next descriptor address register equal to the first descriptor base address */
735             dma_rx_desc->rdes3_bm.next_desc = (uint32_t)desc->rx_desc_list_head;
736         }
737     }
738     /* set receive descriptor list address register */
739     ptr->DMA_RX_DESC_LIST_ADDR = (uint32_t)desc->rx_desc_list_head;
740 }
741 
enet_timestamp_enable(ENET_Type * ptr,bool enable)742 void enet_timestamp_enable(ENET_Type *ptr, bool enable)
743 {
744     /* enable the timestamp */
745     ptr->TS_CTRL &= ~ENET_TS_CTRL_TSENA_MASK;
746     ptr->TS_CTRL |= ENET_TS_CTRL_TSENA_SET(enable);
747 }
748 
enet_set_subsecond_increment(ENET_Type * ptr,uint8_t ssinc)749 void enet_set_subsecond_increment(ENET_Type *ptr, uint8_t ssinc)
750 {
751     ptr->SUB_SEC_INCR &= ~ENET_SUB_SEC_INCR_SSINC_MASK;
752     ptr->SUB_SEC_INCR |= ENET_SUB_SEC_INCR_SSINC_SET(ssinc);
753 }
754 
enet_set_ptp_timestamp(ENET_Type * ptr,enet_ptp_ts_update_t * timestamp)755 void enet_set_ptp_timestamp(ENET_Type *ptr, enet_ptp_ts_update_t *timestamp)
756 {
757     ptr->SYST_SEC_UPD = timestamp->sec;
758     ptr->SYST_NSEC_UPD = timestamp->nsec;
759     ptr->TS_CTRL |= ENET_TS_CTRL_TSINIT_MASK;
760 
761     while (ENET_TS_CTRL_TSINIT_GET(ptr->TS_CTRL) == 1) {
762 
763     }
764 }
765 
enet_get_ptp_timestamp(ENET_Type * ptr,enet_ptp_ts_system_t * timestamp)766 void enet_get_ptp_timestamp(ENET_Type *ptr, enet_ptp_ts_system_t *timestamp)
767 {
768     timestamp->sec = ptr->SYST_SEC;
769     timestamp->nsec = ptr->SYST_NSEC;
770 }
771 
enet_update_ptp_timeoffset(ENET_Type * ptr,enet_ptp_ts_update_t * timeoffset)772 void enet_update_ptp_timeoffset(ENET_Type *ptr, enet_ptp_ts_update_t *timeoffset)
773 {
774     /* write the offset (positive or negative ) in the timestamp update high and low registers */
775     ptr->SYST_SEC_UPD = ENET_SYST_SEC_UPD_TSS_SET(timeoffset->sec);
776     ptr->SYST_NSEC_UPD = ENET_SYST_NSEC_UPD_ADDSUB_SET(timeoffset->sign) | ENET_SYST_NSEC_UPD_TSSS_SET(timeoffset->nsec);
777 
778     /* update the timestamp */
779     ptr->TS_CTRL |= ENET_TS_CTRL_TSUPDT_MASK;
780 
781     /* wait for the updating to finish */
782     while (ENET_TS_CTRL_TSUPDT_GET(ptr->TS_CTRL)) {
783 
784     }
785 }
786 
enet_adjust_ptp_time_freq(ENET_Type * ptr,int32_t adj)787 void enet_adjust_ptp_time_freq(ENET_Type *ptr, int32_t adj)
788 {
789     ptr->TS_ADDEND = (uint32_t)((int64_t)adj * ENET_ADJ_FREQ_BASE_ADDEND / (ENET_ONE_SEC_IN_NANOSEC - adj) + ENET_ADJ_FREQ_BASE_ADDEND);
790 
791     ptr->TS_CTRL |= ENET_TS_CTRL_TSADDREG_MASK;
792 
793     while (ENET_TS_CTRL_TSADDREG_GET(ptr->TS_CTRL)) {
794 
795     }
796 }
797 
enet_set_ptp_version(ENET_Type * ptr,enet_ptp_version_t ptp_ver)798 void enet_set_ptp_version(ENET_Type *ptr, enet_ptp_version_t ptp_ver)
799 {
800     ptr->TS_CTRL &= ~ENET_TS_CTRL_TSVER2ENA_MASK;
801     ptr->TS_CTRL |= ENET_TS_CTRL_TSVER2ENA_SET(ptp_ver);
802 }
803 
enet_enable_ptp_frame_type(ENET_Type * ptr,enet_ptp_frame_type_t ptp_frame_type,bool enable)804 hpm_stat_t enet_enable_ptp_frame_type(ENET_Type *ptr, enet_ptp_frame_type_t ptp_frame_type, bool enable)
805 {
806     hpm_stat_t stat = status_success;
807 
808     if (ptp_frame_type == enet_ptp_frame_ipv4) {
809         ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPV4ENA_MASK;
810         ptr->TS_CTRL |= ENET_TS_CTRL_TSIPV4ENA_SET(enable);
811     } else if (ptp_frame_type == enet_ptp_frame_ipv6) {
812         ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPV6ENA_MASK;
813         ptr->TS_CTRL |= ENET_TS_CTRL_TSIPV6ENA_SET(enable);
814     } else if (ptp_frame_type == enet_ptp_frame_ethernet) {
815         ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPENA_MASK;
816         ptr->TS_CTRL |= ENET_TS_CTRL_TSIPENA_SET(enable);
817     } else {
818         return status_invalid_argument;
819     }
820 
821     return stat;
822 }
823 
enet_set_snapshot_ptp_message_type(ENET_Type * ptr,enet_ts_ss_ptp_msg_t ts_ss_ptp_msg)824 void enet_set_snapshot_ptp_message_type(ENET_Type *ptr, enet_ts_ss_ptp_msg_t ts_ss_ptp_msg)
825 {
826     /* set ptp message type for snapshots */
827     ptr->TS_CTRL &= ~ENET_TS_CTRL_SNAPTYPSEL_MASK;
828     ptr->TS_CTRL &= ~ENET_TS_CTRL_TSMSTRENA_MASK;
829     ptr->TS_CTRL &= ~ENET_TS_CTRL_TSEVNTENA_MASK;
830     ptr->TS_CTRL |= ts_ss_ptp_msg << ENET_TS_CTRL_TSEVNTENA_SHIFT;
831 }
832 
enet_init_ptp(ENET_Type * ptr,enet_ptp_config_t * config)833 void enet_init_ptp(ENET_Type *ptr, enet_ptp_config_t *config)
834 {
835     enet_mask_interrupt_event(ptr, ENET_INTR_STATUS_TSIS_MASK);
836 
837     /* select the resolution of nanosecond */
838     ptr->TS_CTRL &= ~ENET_TS_CTRL_TSCTRLSSR_MASK;
839     ptr->TS_CTRL |= ENET_TS_CTRL_TSCTRLSSR_SET(config->timestamp_rollover_mode);
840 
841     /* enable timestamping */
842     ptr->TS_CTRL |= ENET_TS_CTRL_TSENALL_MASK | ENET_TS_CTRL_TSENA_MASK;
843 
844     /* set sub-second increment */
845     ptr->SUB_SEC_INCR &= ~ENET_SUB_SEC_INCR_SSINC_MASK;
846     ptr->SUB_SEC_INCR |= ENET_SUB_SEC_INCR_SSINC_SET(config->ssinc);
847 
848     if (config->update_method == enet_ptp_time_fine_update) {
849         /* set the addend */
850         ptr->TS_ADDEND = config->addend;
851 
852         /* update the addend */
853         ptr->TS_CTRL |= ENET_TS_CTRL_TSADDREG_MASK;
854 
855         /* poll the status of updating the addend */
856         while (ENET_TS_CTRL_TSADDREG_GET(ptr->TS_CTRL)) {
857 
858         }
859 
860         /* fine update */
861         ptr->TS_CTRL |= ENET_TS_CTRL_TSCFUPDT_MASK;
862     } else {
863         /* coarse update */
864         ptr->TS_CTRL &= ~ENET_TS_CTRL_TSCFUPDT_MASK;
865     }
866 }
867 
enet_set_pps0_control_output(ENET_Type * ptr,enet_pps_ctrl_t freq)868 void enet_set_pps0_control_output(ENET_Type *ptr, enet_pps_ctrl_t freq)
869 {
870     ptr->PPS_CTRL &= ~ENET_PPS_CTRL_PPSEN0_MASK;
871     ptr->PPS_CTRL |= ENET_PPS_CTRL_PPSCTRLCMD0_SET(freq);
872 }
873 
enet_set_ppsx_command(ENET_Type * ptr,enet_pps_cmd_t cmd,enet_pps_idx_t idx)874 hpm_stat_t enet_set_ppsx_command(ENET_Type *ptr, enet_pps_cmd_t cmd, enet_pps_idx_t idx)
875 {
876     if (idx >= ENET_SOC_PPS_MAX_COUNT) {
877         return status_invalid_argument;
878     }
879 
880     /* Wait the last command to complete */
881     while (ptr->PPS_CTRL & (ENET_PPS_CMD_MASK << ((idx + 1) << ENET_PPS_CMD_OFS_FAC))) {
882 
883     }
884 
885     /* Set the specified pps output with a specified command */
886     ptr->PPS_CTRL |= cmd << ((idx + 1) << ENET_PPS_CMD_OFS_FAC);
887 
888     return status_success;
889 }
890 
enet_set_ppsx_config(ENET_Type * ptr,enet_pps_cmd_config_t * cmd_cfg,enet_pps_idx_t idx)891 hpm_stat_t enet_set_ppsx_config(ENET_Type *ptr, enet_pps_cmd_config_t *cmd_cfg, enet_pps_idx_t idx)
892 {
893     if (idx >= ENET_SOC_PPS_MAX_COUNT) {
894         return status_invalid_argument;
895     }
896 
897     /* Set the interval and width for PPSx */
898     if (idx == enet_pps_0) {
899         ptr->PPS0_INTERVAL = cmd_cfg->pps_interval - 1;
900         ptr->PPS0_WIDTH    = cmd_cfg->pps_width - 1;
901     } else {
902         ptr->PPS[idx].INTERVAL = cmd_cfg->pps_interval - 1;
903         ptr->PPS[idx].WIDTH    = cmd_cfg->pps_width - 1;
904     }
905 
906     /* Set the target timestamp */
907     if (idx == enet_pps_0) {
908         ptr->TGTTM_SEC  = cmd_cfg->target_sec;
909         ptr->TGTTM_NSEC = cmd_cfg->target_nsec;
910     } else {
911         ptr->PPS[idx].TGTTM_SEC  = cmd_cfg->target_sec;
912         ptr->PPS[idx].TGTTM_NSEC = cmd_cfg->target_nsec;
913     }
914 
915     /* Set PPS0 as the command function */
916     if (idx == enet_pps_0) {
917         ptr->PPS_CTRL |= ENET_PPS_CTRL_PPSEN0_MASK;
918     }
919 
920 #if ENET_SOC_PPS1_EN
921     if (idx == enet_pps_1) {
922         ptr->PPS_CTRL |= ENET_PPS_CTRL_PPSEN1_MASK;
923     }
924 #endif
925 
926     /* Wait the last command to complete */
927     while (ptr->PPS_CTRL & (ENET_PPS_CMD_MASK << ((idx + 1) << ENET_PPS_CMD_OFS_FAC))) {
928 
929     }
930 
931     /* Initialize with the No Command */
932     ptr->PPS_CTRL &= ~(ENET_PPS_CMD_MASK << ((idx + 1) << ENET_PPS_CMD_OFS_FAC));
933 
934     return status_success;
935 }
936