1 /*
2 * Copyright (c) 2021 HPMicro
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8 /*---------------------------------------------------------------------
9 * Includes
10 *---------------------------------------------------------------------
11 */
12 #include "hpm_enet_drv.h"
13 #include "hpm_enet_soc_drv.h"
14
15 /*---------------------------------------------------------------------
16 * Internal API
17 *---------------------------------------------------------------------
18 */
enet_mode_init(ENET_Type * ptr,uint32_t intr)19 static void enet_mode_init(ENET_Type *ptr, uint32_t intr)
20 {
21 /* receive and transmit store and forward */
22 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_RSF_MASK | ENET_DMA_OP_MODE_TSF_MASK;
23
24 /* enalbe hardware flow control */
25 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_EFC_MASK;
26
27 /* enable error frame and undersized good frame forwarding */
28 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_FEF_MASK;
29
30 /* disable osf mode */
31 ptr->DMA_OP_MODE &= ~ENET_DMA_OP_MODE_OSF_MASK;
32
33 ptr->DMA_INTR_EN |= intr;
34
35 while (ENET_DMA_BUS_STATUS_AXIRDSTS_GET(ptr->DMA_BUS_STATUS) || ENET_DMA_BUS_STATUS_AXWHSTS_GET(ptr->DMA_BUS_STATUS)) {
36 }
37
38 /* start the receive and transmit dma */
39 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_ST_MASK | ENET_DMA_OP_MODE_SR_MASK;
40 }
41
enet_dma_init(ENET_Type * ptr,enet_desc_t * desc,uint32_t intr)42 static int enet_dma_init(ENET_Type *ptr, enet_desc_t *desc, uint32_t intr)
43 {
44 /* generate software reset */
45 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_SWR_MASK;
46
47 /* wait for the completion of reset process */
48 while (ENET_DMA_BUS_MODE_SWR_GET(ptr->DMA_BUS_MODE)) {
49 }
50
51 /* initialize bus mode register */
52 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_AAL_MASK;
53
54 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_FB_MASK;
55
56 /* enable pblx8 mode */
57 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_PBLX8_MASK;
58
59 /* set programmable burst length */
60 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_PBL_SET(enet_pbl_32);
61
62 /* disable separate pbl */
63 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_USP_MASK;
64
65 /* descriptor length */
66 #if ENET_SOC_ALT_EHD_DES_LEN == ENET_SOC_ALT_EHD_DES_MIN_LEN
67 ptr->DMA_BUS_MODE &= ~ENET_DMA_BUS_MODE_ATDS_MASK;
68 #elif ENET_SOC_ALT_EHD_DES_LEN == ENET_SOC_ALT_EHD_DES_MAX_LEN
69 ptr->DMA_BUS_MODE |= ENET_DMA_BUS_MODE_ATDS_MASK;
70 #endif
71
72 /* set the maximum enabled burst length */
73 if (ENET_DMA_BUS_MODE_FB_GET(ptr->DMA_BUS_MODE) == 0) {
74 ptr->DMA_AXI_MODE |= ENET_DMA_AXI_MODE_BLEN4_MASK | ENET_DMA_AXI_MODE_BLEN8_MASK | ENET_DMA_AXI_MODE_BLEN16_MASK;
75 } else {
76 /* TODO: set BLENX_MASK */
77 }
78
79 /* initialize Tx descriptors list: chain mode */
80 enet_dma_tx_desc_chain_init(ptr, desc);
81
82 /* initialize Rx descriptors list: Chain Mode */
83 enet_dma_rx_desc_chain_init(ptr, desc);
84
85 enet_mode_init(ptr, intr);
86
87 enet_dma_flush(ptr);
88
89 return true;
90 }
91
enet_mac_init(ENET_Type * ptr,enet_mac_config_t * config,enet_inf_type_t inf_type)92 static int enet_mac_init(ENET_Type *ptr, enet_mac_config_t *config, enet_inf_type_t inf_type)
93 {
94 for (int i = 0; i < config->valid_max_count; i++) {
95 if (i == 0) {
96 ptr->MAC_ADDR_0_HIGH = ENET_MAC_ADDR_0_HIGH_AE_MASK;
97 ptr->MAC_ADDR_0_HIGH |= ENET_MAC_ADDR_0_HIGH_ADDRHI_SET(config->mac_addr_high[i]);
98 ptr->MAC_ADDR_0_LOW = ENET_MAC_ADDR_0_LOW_ADDRLO_SET(config->mac_addr_low[i]);
99 } else {
100 ptr->MAC_ADDR[i].HIGH |= ENET_MAC_ADDR_HIGH_ADDRHI_SET(config->mac_addr_high[i]);
101 ptr->MAC_ADDR[i].LOW |= ENET_MAC_ADDR_LOW_ADDRLO_SET(config->mac_addr_low[i]);
102 }
103 }
104
105
106 /* set the appropriate filters for the incoming frames */
107 ptr->MACFF |= ENET_MACFF_RA_SET(1); /* receive all */
108
109 /* replace the content of the mac address 0 in the sa field of all transmitted frames */
110 ptr->MACCFG &= ENET_MACCFG_SARC_MASK;
111 ptr->MACCFG |= ENET_MACCFG_SARC_SET(0x3);
112
113 ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
114
115 if (inf_type == enet_inf_rgmii) {
116 ptr->MACCFG &= ~ENET_MACCFG_PS_MASK;
117 } else if (inf_type == enet_inf_rmii) {
118 ptr->MACCFG |= ENET_MACCFG_PS_MASK | ENET_MACCFG_FES_MASK;
119 } else {
120 return status_invalid_argument;
121 }
122
123 ptr->MACCFG |= ENET_MACCFG_DM_MASK;
124
125 if (ENET_MACCFG_DM_GET(ptr->MACCFG) == 0) {
126 ptr->MACCFG |= ENET_MACCFG_IFG_SET(4);
127 } else {
128 ptr->MACCFG |= ENET_MACCFG_IFG_SET(2);
129 }
130
131
132 /* enable transmitter enable and receiver */
133 ptr->MACCFG |= ENET_MACCFG_TE_MASK | ENET_MACCFG_RE_MASK;
134
135 return true;
136 }
137
138 /*---------------------------------------------------------------------
139 * Driver API
140 *---------------------------------------------------------------------
141 */
enet_dma_flush(ENET_Type * ptr)142 void enet_dma_flush(ENET_Type *ptr)
143 {
144 /* flush DMA transmit FIFO */
145 ptr->DMA_OP_MODE |= ENET_DMA_OP_MODE_FTF_MASK;
146 while (ENET_DMA_OP_MODE_FTF_GET(ptr->DMA_OP_MODE)) {
147
148 }
149 }
150
enet_write_phy(ENET_Type * ptr,uint32_t phy_addr,uint32_t addr,uint32_t data)151 void enet_write_phy(ENET_Type *ptr, uint32_t phy_addr, uint32_t addr, uint32_t data)
152 {
153 /* set data to be written */
154 ptr->GMII_DATA = ENET_GMII_DATA_GD_SET(data);
155
156 /* set phy address , register address, write operation and busy flag */
157 ptr->GMII_ADDR = ENET_GMII_ADDR_PA_SET(phy_addr)
158 | ENET_GMII_ADDR_GR_SET(addr)
159 | ENET_GMII_ADDR_CR_SET(enet_csr_150m_to_250m_mdc_csr_div_102)
160 | ENET_GMII_ADDR_GW_SET(enet_phy_op_write)
161 | ENET_GMII_ADDR_GB_SET(enet_phy_busy);
162
163 /* wait until the write operation is completed */
164 while (ENET_GMII_ADDR_GB_GET(ptr->GMII_ADDR)) {
165 }
166 }
167
enet_read_phy(ENET_Type * ptr,uint32_t phy_addr,uint32_t addr)168 uint16_t enet_read_phy(ENET_Type *ptr, uint32_t phy_addr, uint32_t addr)
169 {
170 /* set phy address, register address, read operation and busy flag */
171 ptr->GMII_ADDR = ENET_GMII_ADDR_PA_SET(phy_addr)
172 | ENET_GMII_ADDR_GR_SET(addr)
173 | ENET_GMII_ADDR_CR_SET(enet_csr_150m_to_250m_mdc_csr_div_102)
174 | ENET_GMII_ADDR_GW_SET(enet_phy_op_read)
175 | ENET_GMII_ADDR_GB_SET(enet_phy_busy);
176
177 /* wait until the write operation is completed */
178 while (ENET_GMII_ADDR_GB_GET(ptr->GMII_ADDR)) {
179 }
180
181 /* read and return data */
182 return (uint16_t)ENET_GMII_DATA_GD_GET(ptr->GMII_DATA);
183 }
184
enet_controller_init(ENET_Type * ptr,enet_inf_type_t inf_type,enet_desc_t * desc,enet_mac_config_t * config,uint32_t intr)185 int enet_controller_init(ENET_Type *ptr, enet_inf_type_t inf_type, enet_desc_t *desc, enet_mac_config_t *config, uint32_t intr)
186 {
187 /* select an interface */
188 enet_intf_selection(ptr, inf_type);
189
190 /* initialize DMA */
191 enet_dma_init(ptr, desc, intr);
192
193 /* Initialize MAC */
194 enet_mac_init(ptr, config, inf_type);
195
196 return true;
197 }
198
199 /*****************************************************************************
200 * DMA API
201 *****************************************************************************
202 */
enet_check_received_frame(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info)203 uint32_t enet_check_received_frame(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info)
204 {
205 enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
206
207 /* check if the last segment */
208 if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
209 (rx_desc_list_cur->rdes0_bm.ls == 1)) {
210 rx_frame_info->seg_count++;
211 if (rx_frame_info->seg_count == 1) {
212 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
213 }
214 rx_frame_info->ls_rx_desc = rx_desc_list_cur;
215 return 1;
216 }
217 /* check if the first segment */
218 else if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
219 (rx_desc_list_cur->rdes0_bm.fs == 1) &&
220 (rx_desc_list_cur->rdes0_bm.ls == 0)) {
221 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
222 rx_frame_info->ls_rx_desc = NULL;
223 rx_frame_info->seg_count = 1;
224 rx_desc_list_cur = (enet_rx_desc_t *)rx_desc_list_cur->rdes3_bm.next_desc;
225 *parent_rx_desc_list_cur = rx_desc_list_cur;
226 }
227
228 /* check if intermediate segments */
229 else if ((rx_desc_list_cur->rdes0_bm.own == 0) &&
230 (rx_desc_list_cur->rdes0_bm.fs == 0) &&
231 (rx_desc_list_cur->rdes0_bm.ls == 0)) {
232 rx_frame_info->seg_count++;
233 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
234 *parent_rx_desc_list_cur = rx_desc_list_cur;
235 }
236
237 return 0;
238 }
239
enet_get_received_frame(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info)240 enet_frame_t enet_get_received_frame(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info)
241 {
242 uint32_t frame_length = 0;
243 enet_frame_t frame = {0, 0, 0};
244 enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
245
246 /* get the frame length of the received packet: substruct 4 bytes of the CRC */
247 frame_length = rx_desc_list_cur->rdes0_bm.fl - 4;
248 frame.length = frame_length;
249
250 /* get the address of the first frame descriptor and the buffer start address */
251 frame.rx_desc = rx_frame_info->fs_rx_desc;
252 frame.buffer = rx_frame_info->fs_rx_desc->rdes2_bm.buffer1;
253
254 /* update the Ethernet dma global Rx descriptor with next Rx descriptor */
255 /* chained mode */
256 /* selects the next dma Rx descriptor list for next buffer to read */
257 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
258 *parent_rx_desc_list_cur = rx_desc_list_cur;
259
260 return frame;
261 }
262
enet_get_received_frame_interrupt(enet_rx_desc_t ** parent_rx_desc_list_cur,enet_rx_frame_info_t * rx_frame_info,uint32_t rx_desc_count)263 enet_frame_t enet_get_received_frame_interrupt(enet_rx_desc_t **parent_rx_desc_list_cur, enet_rx_frame_info_t *rx_frame_info, uint32_t rx_desc_count)
264 {
265 enet_frame_t frame = {0, 0, 0};
266 uint32_t desc_scan_counter = 0;
267 enet_rx_desc_t *rx_desc_list_cur = *parent_rx_desc_list_cur;
268
269 /* scan descriptors owned by CPU */
270 while ((rx_desc_list_cur->rdes0_bm.own == 0) &&
271 (desc_scan_counter < rx_desc_count)) {
272
273 desc_scan_counter++;
274
275 /* check if first segment in frame */
276 if ((rx_desc_list_cur->rdes0_bm.fs == 1) &&
277 (rx_desc_list_cur->rdes0_bm.ls == 0)) {
278 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
279 rx_frame_info->seg_count = 1;
280 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
281 *parent_rx_desc_list_cur = rx_desc_list_cur;
282 }
283
284 /* check if intermediate segment */
285 else if ((rx_desc_list_cur->rdes0_bm.ls == 0) &&
286 (rx_desc_list_cur->rdes0_bm.fs == 0)) {
287 rx_frame_info->seg_count++;
288 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
289 *parent_rx_desc_list_cur = rx_desc_list_cur;
290 }
291
292 /* should be last segment */
293 else {
294 /* last segment */
295 rx_frame_info->ls_rx_desc = rx_desc_list_cur;
296
297 rx_frame_info->seg_count++;
298
299 /* first segment is last segment */
300 if (rx_frame_info->seg_count == 1) {
301 rx_frame_info->fs_rx_desc = rx_desc_list_cur;
302 }
303
304 /* get the frame length of the received packet: substruct 4 bytes of the crc */
305 frame.length = rx_desc_list_cur->rdes0_bm.fl - 4;
306
307 /* get the address of the buffer start address */
308 /* check if more than one segment in the frame */
309 if (rx_frame_info->seg_count > 1) {
310 frame.buffer = rx_frame_info->fs_rx_desc->rdes2_bm.buffer1;
311 } else {
312 frame.buffer = rx_desc_list_cur->rdes2_bm.buffer1;
313 }
314
315 frame.rx_desc = rx_frame_info->fs_rx_desc;
316
317 rx_desc_list_cur = (enet_rx_desc_t *)(rx_desc_list_cur->rdes3_bm.next_desc);
318 *parent_rx_desc_list_cur = rx_desc_list_cur;
319
320 return frame;
321 }
322 }
323
324 return frame;
325 }
326
enet_prepare_transmission_descriptors(ENET_Type * ptr,enet_tx_desc_t ** parent_tx_desc_list_cur,uint16_t frame_length,uint16_t tx_buff_size)327 uint32_t enet_prepare_transmission_descriptors(ENET_Type *ptr, enet_tx_desc_t **parent_tx_desc_list_cur, uint16_t frame_length, uint16_t tx_buff_size)
328 {
329 uint32_t buf_count = 0, size = 0, i = 0;
330 enet_tx_desc_t *dma_tx_desc;
331 enet_tx_desc_t *tx_desc_list_cur = *parent_tx_desc_list_cur;
332
333 if (tx_buff_size == 0) {
334 return ENET_ERROR;
335 }
336 /* check if the descriptor is owned by the Ethernet DMA (when set) or CPU (when reset) */
337 dma_tx_desc = tx_desc_list_cur;
338 if (frame_length > tx_buff_size) {
339 buf_count = frame_length / tx_buff_size;
340 if (frame_length % tx_buff_size) {
341 buf_count++;
342 }
343 } else {
344 buf_count = 1;
345 }
346
347 if (buf_count == 1) {
348 /*set the last and the first segment */
349 dma_tx_desc->tdes0_bm.own = 0;
350 dma_tx_desc->tdes0_bm.ic = 0;
351 dma_tx_desc->tdes0_bm.fs = 1;
352 dma_tx_desc->tdes0_bm.ls = 1;
353 dma_tx_desc->tdes0_bm.dc = 1;
354 dma_tx_desc->tdes0_bm.dp = 0;
355 dma_tx_desc->tdes0_bm.crcr = 1;
356 dma_tx_desc->tdes0_bm.cic = 3;
357 dma_tx_desc->tdes1_bm.saic = 2;
358
359 /* set the frame size */
360 dma_tx_desc->tdes1_bm.tbs1 = (frame_length & ENET_DMATxDesc_TBS1);
361 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
362 dma_tx_desc->tdes0_bm.own = 1;
363 ptr->DMA_TX_POLL_DEMAND = 1;
364
365 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
366 } else {
367 for (i = 0; i < buf_count; i++) {
368 /* clear first and last segment bits */
369 dma_tx_desc->tdes0_bm.fs = 0;
370 dma_tx_desc->tdes0_bm.ls = 0;
371
372 if (i == 0) {
373 /* setting the first segment bit */
374 dma_tx_desc->tdes0_bm.fs = 1;
375 }
376
377 /* set the buffer 1 size */
378 dma_tx_desc->tdes1_bm.tbs1 = (tx_buff_size & ENET_DMATxDesc_TBS1);
379
380 if (i == (buf_count - 1)) {
381 /* set the last segment bit */
382 dma_tx_desc->tdes0_bm.ls = 1;
383 size = frame_length - (buf_count - 1) * tx_buff_size;
384 dma_tx_desc->tdes1_bm.tbs1 = (size & ENET_DMATxDesc_TBS1);
385
386 /* set own bit of the Tx descriptor status: gives the buffer back to Ethernet DMA */
387 dma_tx_desc->tdes0_bm.own = 1;
388 ptr->DMA_TX_POLL_DEMAND = 1;
389 }
390
391 dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
392 }
393 }
394
395 tx_desc_list_cur = dma_tx_desc;
396 *parent_tx_desc_list_cur = tx_desc_list_cur;
397
398 return ENET_SUCCESS;
399 }
400
enet_dma_tx_desc_chain_init(ENET_Type * ptr,enet_desc_t * desc)401 void enet_dma_tx_desc_chain_init(ENET_Type *ptr, enet_desc_t *desc)
402 {
403 uint32_t i = 0;
404 enet_tx_desc_t *dma_tx_desc;
405
406 /* set the tx_desc_list_cur pointer with the first one of the dma_tx_desc_tab list */
407 desc->tx_desc_list_cur = desc->tx_desc_list_head;
408
409 /* fill each dma_tx_desc descriptor with the right values */
410 for (i = 0; i < desc->tx_buff_cfg.count; i++) {
411
412 /* get the pointer on the ith member of the Tx desc list */
413 dma_tx_desc = desc->tx_desc_list_head + i;
414
415 /* set second address chained bit */
416 dma_tx_desc->tdes0_bm.tch = 1;
417
418 /* set buffer 1 address pointer */
419 dma_tx_desc->tdes2_bm.buffer1 = (uint32_t)(&((uint8_t *)desc->tx_buff_cfg.buffer)[i * desc->tx_buff_cfg.size]);
420
421 /* link all Tx descriptors */
422 if (i < desc->tx_buff_cfg.count - 1) {
423 /* set next descriptor address register with the next descriptor base address */
424 dma_tx_desc->tdes3_bm.next_desc = (uint32_t)(desc->tx_desc_list_head + i + 1);
425 } else {
426 /* for last descriptor, set next descriptor address register equal to the first descriptor base address */
427 dma_tx_desc->tdes3_bm.next_desc = (uint32_t)desc->tx_desc_list_head;
428 }
429 }
430
431 /* set transmit descriptor list address register */
432 ptr->DMA_TX_DESC_LIST_ADDR = (uint32_t)desc->tx_desc_list_head;
433 }
434
enet_dma_rx_desc_chain_init(ENET_Type * ptr,enet_desc_t * desc)435 void enet_dma_rx_desc_chain_init(ENET_Type *ptr, enet_desc_t *desc)
436 {
437 uint32_t i = 0;
438 enet_rx_desc_t *dma_rx_desc;
439
440 /* set the rx_desc_list_cur pointer with the first one of the dma_rx_desc_tab list */
441 desc->rx_desc_list_cur = desc->rx_desc_list_head;
442 /* fill each dma_rx_desc descriptor with the right values */
443 for (i = 0; i < desc->rx_buff_cfg.count; i++) {
444 /* get the pointer on the ith member of the Rx desc list */
445 dma_rx_desc = desc->rx_desc_list_head + i;
446 /* set own bit of the rx descriptor status */
447 dma_rx_desc->rdes0_bm.own = 1;
448
449 /* set buffer 1 size and second address chained bit */
450 dma_rx_desc->rdes1_bm.rch = 1;
451 dma_rx_desc->rdes1_bm.rbs1 = desc->rx_buff_cfg.size;
452
453 /* set buffer 1 address pointer */
454 dma_rx_desc->rdes2_bm.buffer1 = (uint32_t)(&((uint8_t *)desc->rx_buff_cfg.buffer)[i * desc->rx_buff_cfg.size]);
455
456 /* link all Rx descriptors */
457 if (i < desc->rx_buff_cfg.count - 1) {
458 /* set next descriptor address register with next descriptor base address */
459 dma_rx_desc->rdes3_bm.next_desc = (uint32_t)(desc->rx_desc_list_head + i + 1);
460 } else {
461 /* for last descriptor, set next descriptor address register equal to the first descriptor base address */
462 dma_rx_desc->rdes3_bm.next_desc = (uint32_t)desc->rx_desc_list_head;
463 }
464 }
465 /* set receive descriptor list address register */
466 ptr->DMA_RX_DESC_LIST_ADDR = (uint32_t)desc->rx_desc_list_head;
467 }
468
enet_mask_interrupt_event(ENET_Type * ptr,uint32_t mask)469 void enet_mask_interrupt_event(ENET_Type *ptr, uint32_t mask)
470 {
471 /* mask the specified interrupts */
472 ptr->INTR_MASK |= mask;
473 }
474
enet_timestamp_enable(ENET_Type * ptr,bool enable)475 void enet_timestamp_enable(ENET_Type *ptr, bool enable)
476 {
477 /* enable the timestamp */
478 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSENA_MASK;
479 ptr->TS_CTRL |= ENET_TS_CTRL_TSENA_SET(enable);
480 }
481
enet_set_subsecond_increment(ENET_Type * ptr,uint8_t ssinc)482 void enet_set_subsecond_increment(ENET_Type *ptr, uint8_t ssinc)
483 {
484 ptr->SUB_SEC_INCR &= ~ENET_SUB_SEC_INCR_SSINC_MASK;
485 ptr->SUB_SEC_INCR |= ENET_SUB_SEC_INCR_SSINC_SET(ssinc);
486 }
487
enet_set_ptp_timestamp(ENET_Type * ptr,enet_ptp_time_t * timestamp)488 void enet_set_ptp_timestamp(ENET_Type *ptr, enet_ptp_time_t *timestamp)
489 {
490 ptr->SYST_SEC_UPD = timestamp->sec;
491 ptr->SYST_NSEC_UPD = timestamp->nsec;
492 ptr->TS_CTRL |= ENET_TS_CTRL_TSINIT_MASK;
493
494 while (ENET_TS_CTRL_TSINIT_GET(ptr->TS_CTRL) == 1) {
495
496 }
497 }
498
enet_get_ptp_timestamp(ENET_Type * ptr,enet_ptp_time_t * timestamp)499 void enet_get_ptp_timestamp(ENET_Type *ptr, enet_ptp_time_t *timestamp)
500 {
501 timestamp->sec = ptr->SYST_SEC;
502 timestamp->nsec = ptr->SYST_NSEC;
503 }
504
enet_update_ptp_timeoffset(ENET_Type * ptr,enet_ptp_time_t * timeoffset)505 void enet_update_ptp_timeoffset(ENET_Type *ptr, enet_ptp_time_t *timeoffset)
506 {
507 /* write the offset (positive or negative ) in the timestamp update high and low registers */
508 ptr->SYST_SEC_UPD = ENET_SYST_SEC_UPD_TSS_SET(timeoffset->sec);
509 ptr->SYST_NSEC_UPD = ENET_SYST_NSEC_UPD_ADDSUB_SET(timeoffset->sign) | ENET_SYST_NSEC_UPD_TSSS_SET(timeoffset->nsec);
510
511 /* update the timestamp */
512 ptr->TS_CTRL |= ENET_TS_CTRL_TSUPDT_MASK;
513
514 /* wait for the updating to finish */
515 while (ENET_TS_CTRL_TSUPDT_GET(ptr->TS_CTRL)) {
516
517 }
518 }
519
enet_adjust_ptp_time_freq(ENET_Type * ptr,int32_t adj)520 void enet_adjust_ptp_time_freq(ENET_Type *ptr, int32_t adj)
521 {
522 ptr->TS_ADDEND = (uint32_t)((int64_t)adj * ENET_ADJ_FREQ_BASE_ADDEND / (ENET_ONE_SEC_IN_NANOSEC - adj) + ENET_ADJ_FREQ_BASE_ADDEND);
523
524 ptr->TS_CTRL |= ENET_TS_CTRL_TSADDREG_MASK;
525
526 while (ENET_TS_CTRL_TSADDREG_GET(ptr->TS_CTRL)) {
527
528 }
529 }
530
enet_set_ptp_version(ENET_Type * ptr,enet_ptp_version_t ptp_ver)531 void enet_set_ptp_version(ENET_Type *ptr, enet_ptp_version_t ptp_ver)
532 {
533 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSVER2ENA_MASK;
534 ptr->TS_CTRL |= ENET_TS_CTRL_TSVER2ENA_SET(ptp_ver);
535 }
536
enet_enable_ptp_frame_type(ENET_Type * ptr,enet_ptp_frame_type_t ptp_frame_type,bool enable)537 hpm_stat_t enet_enable_ptp_frame_type(ENET_Type *ptr, enet_ptp_frame_type_t ptp_frame_type, bool enable)
538 {
539 hpm_stat_t stat = status_success;
540
541 if (ptp_frame_type == enet_ptp_frame_ipv4) {
542 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPV4ENA_MASK;
543 ptr->TS_CTRL |= ENET_TS_CTRL_TSIPV4ENA_SET(enable);
544 } else if (ptp_frame_type == enet_ptp_frame_ipv6) {
545 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPV6ENA_MASK;
546 ptr->TS_CTRL |= ENET_TS_CTRL_TSIPV6ENA_SET(enable);
547 } else if (ptp_frame_type == enet_ptp_frame_ethernet) {
548 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSIPENA_MASK;
549 ptr->TS_CTRL |= ENET_TS_CTRL_TSIPENA_SET(enable);
550 } else {
551 return status_invalid_argument;
552 }
553
554 return stat;
555 }
556
enet_set_snapshot_ptp_message_type(ENET_Type * ptr,enet_ts_ss_ptp_msg_t ts_ss_ptp_msg)557 void enet_set_snapshot_ptp_message_type(ENET_Type *ptr, enet_ts_ss_ptp_msg_t ts_ss_ptp_msg)
558 {
559 /* set ptp message type for snapshots */
560 ptr->TS_CTRL &= ~ENET_TS_CTRL_SNAPTYPSEL_MASK;
561 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSMSTRENA_MASK;
562 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSEVNTENA_MASK;
563 ptr->TS_CTRL |= ts_ss_ptp_msg << ENET_TS_CTRL_TSEVNTENA_SHIFT;
564 }
565
enet_init_ptp(ENET_Type * ptr,enet_ptp_config_t * config)566 void enet_init_ptp(ENET_Type *ptr, enet_ptp_config_t *config)
567 {
568 enet_mask_interrupt_event(ptr, ENET_INTR_STATUS_TSIS_MASK);
569
570 /* select the resolution of nanosecond */
571 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSCTRLSSR_MASK;
572 ptr->TS_CTRL |= ENET_TS_CTRL_TSCTRLSSR_SET(config->sub_sec_count_res);
573
574 /* enable timestamping */
575 ptr->TS_CTRL |= ENET_TS_CTRL_TSENALL_MASK | ENET_TS_CTRL_TSENA_MASK;
576
577 /* set sub-second increment */
578 ptr->SUB_SEC_INCR &= ~ENET_SUB_SEC_INCR_SSINC_MASK;
579 ptr->SUB_SEC_INCR |= ENET_SUB_SEC_INCR_SSINC_SET(config->ssinc);
580
581 if (config->update_method == enet_ptp_time_fine_update) {
582 /* set the addend */
583 ptr->TS_ADDEND = config->addend;
584
585 /* update the addend */
586 ptr->TS_CTRL |= ENET_TS_CTRL_TSADDREG_MASK;
587
588 /* poll the status of updating the addend */
589 while (ENET_TS_CTRL_TSADDREG_GET(ptr->TS_CTRL)) {
590
591 }
592
593 /* fine update */
594 ptr->TS_CTRL |= ENET_TS_CTRL_TSCFUPDT_MASK;
595 } else {
596 /* coarse update */
597 ptr->TS_CTRL &= ~ENET_TS_CTRL_TSCFUPDT_MASK;
598 }
599 }
600