1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include <linux/delay.h>
5 #include "ice_common.h"
6 #include "ice_ptp_hw.h"
7 #include "ice_ptp_consts.h"
8 #include "ice_cgu_regs.h"
9
10 /* Low level functions for interacting with and managing the device clock used
11 * for the Precision Time Protocol.
12 *
13 * The ice hardware represents the current time using three registers:
14 *
15 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
16 * +---------------+ +---------------+ +---------------+
17 * | 32 bits | | 32 bits | | 32 bits |
18 * +---------------+ +---------------+ +---------------+
19 *
20 * The registers are incremented every clock tick using a 40bit increment
21 * value defined over two registers:
22 *
23 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
24 * +---------------+ +---------------+
25 * | 8 bit s | | 32 bits |
26 * +---------------+ +---------------+
27 *
28 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
29 * registers every clock source tick. Depending on the specific device
30 * configuration, the clock source frequency could be one of a number of
31 * values.
32 *
33 * For E810 devices, the increment frequency is 812.5 MHz
34 *
35 * For E822 devices the clock can be derived from different sources, and the
36 * increment has an effective frequency of one of the following:
37 * - 823.4375 MHz
38 * - 783.36 MHz
39 * - 796.875 MHz
40 * - 816 MHz
41 * - 830.078125 MHz
42 * - 783.36 MHz
43 *
44 * The hardware captures timestamps in the PHY for incoming packets, and for
45 * outgoing packets on request. To support this, the PHY maintains a timer
46 * that matches the lower 64 bits of the global source timer.
47 *
48 * In order to ensure that the PHY timers and the source timer are equivalent,
49 * shadow registers are used to prepare the desired initial values. A special
50 * sync command is issued to trigger copying from the shadow registers into
51 * the appropriate source and PHY registers simultaneously.
52 *
53 * The driver supports devices which have different PHYs with subtly different
54 * mechanisms to program and control the timers. We divide the devices into
55 * families named after the first major device, E810 and similar devices, and
56 * E822 and similar devices.
57 *
58 * - E822 based devices have additional support for fine grained Vernier
59 * calibration which requires significant setup
60 * - The layout of timestamp data in the PHY register blocks is different
61 * - The way timer synchronization commands are issued is different.
62 *
63 * To support this, very low level functions have an e810 or e822 suffix
64 * indicating what type of device they work on. Higher level abstractions for
65 * tasks that can be done on both devices do not have the suffix and will
66 * correctly look up the appropriate low level function when running.
67 *
68 * Functions which only make sense on a single device family may not have
69 * a suitable generic implementation
70 */
71
72 /**
73 * ice_get_ptp_src_clock_index - determine source clock index
74 * @hw: pointer to HW struct
75 *
76 * Determine the source clock index currently in use, based on device
77 * capabilities reported during initialization.
78 */
ice_get_ptp_src_clock_index(struct ice_hw * hw)79 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
80 {
81 return hw->func_caps.ts_func_info.tmr_index_assoc;
82 }
83
84 /**
85 * ice_ptp_read_src_incval - Read source timer increment value
86 * @hw: pointer to HW struct
87 *
88 * Read the increment value of the source timer and return it.
89 */
ice_ptp_read_src_incval(struct ice_hw * hw)90 static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
91 {
92 u32 lo, hi;
93 u8 tmr_idx;
94
95 tmr_idx = ice_get_ptp_src_clock_index(hw);
96
97 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
98 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
99
100 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
101 }
102
103 /**
104 * ice_ptp_src_cmd - Prepare source timer for a timer command
105 * @hw: pointer to HW structure
106 * @cmd: Timer command
107 *
108 * Prepare the source timer for an upcoming timer sync command.
109 */
ice_ptp_src_cmd(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)110 static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
111 {
112 u32 cmd_val;
113 u8 tmr_idx;
114
115 tmr_idx = ice_get_ptp_src_clock_index(hw);
116 cmd_val = tmr_idx << SEL_CPK_SRC;
117
118 switch (cmd) {
119 case INIT_TIME:
120 cmd_val |= GLTSYN_CMD_INIT_TIME;
121 break;
122 case INIT_INCVAL:
123 cmd_val |= GLTSYN_CMD_INIT_INCVAL;
124 break;
125 case ADJ_TIME:
126 cmd_val |= GLTSYN_CMD_ADJ_TIME;
127 break;
128 case ADJ_TIME_AT_TIME:
129 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
130 break;
131 case READ_TIME:
132 cmd_val |= GLTSYN_CMD_READ_TIME;
133 break;
134 case ICE_PTP_NOP:
135 break;
136 }
137
138 wr32(hw, GLTSYN_CMD, cmd_val);
139 }
140
141 /**
142 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
143 * @hw: pointer to HW struct
144 *
145 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
146 * write immediately. This triggers the hardware to begin executing all of the
147 * source and PHY timer commands synchronously.
148 */
ice_ptp_exec_tmr_cmd(struct ice_hw * hw)149 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
150 {
151 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
152 ice_flush(hw);
153 }
154
155 /* E822 family functions
156 *
157 * The following functions operate on the E822 family of devices.
158 */
159
160 /**
161 * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
162 * @msg: the PHY message buffer to fill in
163 * @port: the port to access
164 * @offset: the register offset
165 */
166 static void
ice_fill_phy_msg_e822(struct ice_sbq_msg_input * msg,u8 port,u16 offset)167 ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
168 {
169 int phy_port, phy, quadtype;
170
171 phy_port = port % ICE_PORTS_PER_PHY;
172 phy = port / ICE_PORTS_PER_PHY;
173 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
174
175 if (quadtype == 0) {
176 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
177 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
178 } else {
179 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
180 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
181 }
182
183 if (phy == 0)
184 msg->dest_dev = rmn_0;
185 else if (phy == 1)
186 msg->dest_dev = rmn_1;
187 else
188 msg->dest_dev = rmn_2;
189 }
190
191 /**
192 * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
193 * @low_addr: the low address to check
194 * @high_addr: on return, contains the high address of the 64bit register
195 *
196 * Checks if the provided low address is one of the known 64bit PHY values
197 * represented as two 32bit registers. If it is, return the appropriate high
198 * register offset to use.
199 */
ice_is_64b_phy_reg_e822(u16 low_addr,u16 * high_addr)200 static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
201 {
202 switch (low_addr) {
203 case P_REG_PAR_PCS_TX_OFFSET_L:
204 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
205 return true;
206 case P_REG_PAR_PCS_RX_OFFSET_L:
207 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
208 return true;
209 case P_REG_PAR_TX_TIME_L:
210 *high_addr = P_REG_PAR_TX_TIME_U;
211 return true;
212 case P_REG_PAR_RX_TIME_L:
213 *high_addr = P_REG_PAR_RX_TIME_U;
214 return true;
215 case P_REG_TOTAL_TX_OFFSET_L:
216 *high_addr = P_REG_TOTAL_TX_OFFSET_U;
217 return true;
218 case P_REG_TOTAL_RX_OFFSET_L:
219 *high_addr = P_REG_TOTAL_RX_OFFSET_U;
220 return true;
221 case P_REG_UIX66_10G_40G_L:
222 *high_addr = P_REG_UIX66_10G_40G_U;
223 return true;
224 case P_REG_UIX66_25G_100G_L:
225 *high_addr = P_REG_UIX66_25G_100G_U;
226 return true;
227 case P_REG_TX_CAPTURE_L:
228 *high_addr = P_REG_TX_CAPTURE_U;
229 return true;
230 case P_REG_RX_CAPTURE_L:
231 *high_addr = P_REG_RX_CAPTURE_U;
232 return true;
233 case P_REG_TX_TIMER_INC_PRE_L:
234 *high_addr = P_REG_TX_TIMER_INC_PRE_U;
235 return true;
236 case P_REG_RX_TIMER_INC_PRE_L:
237 *high_addr = P_REG_RX_TIMER_INC_PRE_U;
238 return true;
239 default:
240 return false;
241 }
242 }
243
244 /**
245 * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
246 * @low_addr: the low address to check
247 * @high_addr: on return, contains the high address of the 40bit value
248 *
249 * Checks if the provided low address is one of the known 40bit PHY values
250 * split into two registers with the lower 8 bits in the low register and the
251 * upper 32 bits in the high register. If it is, return the appropriate high
252 * register offset to use.
253 */
ice_is_40b_phy_reg_e822(u16 low_addr,u16 * high_addr)254 static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
255 {
256 switch (low_addr) {
257 case P_REG_TIMETUS_L:
258 *high_addr = P_REG_TIMETUS_U;
259 return true;
260 case P_REG_PAR_RX_TUS_L:
261 *high_addr = P_REG_PAR_RX_TUS_U;
262 return true;
263 case P_REG_PAR_TX_TUS_L:
264 *high_addr = P_REG_PAR_TX_TUS_U;
265 return true;
266 case P_REG_PCS_RX_TUS_L:
267 *high_addr = P_REG_PCS_RX_TUS_U;
268 return true;
269 case P_REG_PCS_TX_TUS_L:
270 *high_addr = P_REG_PCS_TX_TUS_U;
271 return true;
272 case P_REG_DESK_PAR_RX_TUS_L:
273 *high_addr = P_REG_DESK_PAR_RX_TUS_U;
274 return true;
275 case P_REG_DESK_PAR_TX_TUS_L:
276 *high_addr = P_REG_DESK_PAR_TX_TUS_U;
277 return true;
278 case P_REG_DESK_PCS_RX_TUS_L:
279 *high_addr = P_REG_DESK_PCS_RX_TUS_U;
280 return true;
281 case P_REG_DESK_PCS_TX_TUS_L:
282 *high_addr = P_REG_DESK_PCS_TX_TUS_U;
283 return true;
284 default:
285 return false;
286 }
287 }
288
289 /**
290 * ice_read_phy_reg_e822 - Read a PHY register
291 * @hw: pointer to the HW struct
292 * @port: PHY port to read from
293 * @offset: PHY register offset to read
294 * @val: on return, the contents read from the PHY
295 *
296 * Read a PHY register for the given port over the device sideband queue.
297 */
298 int
ice_read_phy_reg_e822(struct ice_hw * hw,u8 port,u16 offset,u32 * val)299 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
300 {
301 struct ice_sbq_msg_input msg = {0};
302 int err;
303
304 ice_fill_phy_msg_e822(&msg, port, offset);
305 msg.opcode = ice_sbq_msg_rd;
306
307 err = ice_sbq_rw_reg(hw, &msg);
308 if (err) {
309 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
310 err);
311 return err;
312 }
313
314 *val = msg.data;
315
316 return 0;
317 }
318
319 /**
320 * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
321 * @hw: pointer to the HW struct
322 * @port: PHY port to read from
323 * @low_addr: offset of the lower register to read from
324 * @val: on return, the contents of the 64bit value from the PHY registers
325 *
326 * Reads the two registers associated with a 64bit value and returns it in the
327 * val pointer. The offset always specifies the lower register offset to use.
328 * The high offset is looked up. This function only operates on registers
329 * known to be two parts of a 64bit value.
330 */
331 static int
ice_read_64b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 * val)332 ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
333 {
334 u32 low, high;
335 u16 high_addr;
336 int err;
337
338 /* Only operate on registers known to be split into two 32bit
339 * registers.
340 */
341 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
342 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
343 low_addr);
344 return -EINVAL;
345 }
346
347 err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
348 if (err) {
349 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
350 low_addr, err);
351 return err;
352 }
353
354 err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
355 if (err) {
356 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
357 high_addr, err);
358 return err;
359 }
360
361 *val = (u64)high << 32 | low;
362
363 return 0;
364 }
365
366 /**
367 * ice_write_phy_reg_e822 - Write a PHY register
368 * @hw: pointer to the HW struct
369 * @port: PHY port to write to
370 * @offset: PHY register offset to write
371 * @val: The value to write to the register
372 *
373 * Write a PHY register for the given port over the device sideband queue.
374 */
375 int
ice_write_phy_reg_e822(struct ice_hw * hw,u8 port,u16 offset,u32 val)376 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
377 {
378 struct ice_sbq_msg_input msg = {0};
379 int err;
380
381 ice_fill_phy_msg_e822(&msg, port, offset);
382 msg.opcode = ice_sbq_msg_wr;
383 msg.data = val;
384
385 err = ice_sbq_rw_reg(hw, &msg);
386 if (err) {
387 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
388 err);
389 return err;
390 }
391
392 return 0;
393 }
394
395 /**
396 * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
397 * @hw: pointer to the HW struct
398 * @port: port to write to
399 * @low_addr: offset of the low register
400 * @val: 40b value to write
401 *
402 * Write the provided 40b value to the two associated registers by splitting
403 * it up into two chunks, the lower 8 bits and the upper 32 bits.
404 */
405 static int
ice_write_40b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 val)406 ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
407 {
408 u32 low, high;
409 u16 high_addr;
410 int err;
411
412 /* Only operate on registers known to be split into a lower 8 bit
413 * register and an upper 32 bit register.
414 */
415 if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
416 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
417 low_addr);
418 return -EINVAL;
419 }
420
421 low = (u32)(val & P_REG_40B_LOW_M);
422 high = (u32)(val >> P_REG_40B_HIGH_S);
423
424 err = ice_write_phy_reg_e822(hw, port, low_addr, low);
425 if (err) {
426 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
427 low_addr, err);
428 return err;
429 }
430
431 err = ice_write_phy_reg_e822(hw, port, high_addr, high);
432 if (err) {
433 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
434 high_addr, err);
435 return err;
436 }
437
438 return 0;
439 }
440
441 /**
442 * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
443 * @hw: pointer to the HW struct
444 * @port: PHY port to read from
445 * @low_addr: offset of the lower register to read from
446 * @val: the contents of the 64bit value to write to PHY
447 *
448 * Write the 64bit value to the two associated 32bit PHY registers. The offset
449 * is always specified as the lower register, and the high address is looked
450 * up. This function only operates on registers known to be two parts of
451 * a 64bit value.
452 */
453 static int
ice_write_64b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 val)454 ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
455 {
456 u32 low, high;
457 u16 high_addr;
458 int err;
459
460 /* Only operate on registers known to be split into two 32bit
461 * registers.
462 */
463 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
464 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
465 low_addr);
466 return -EINVAL;
467 }
468
469 low = lower_32_bits(val);
470 high = upper_32_bits(val);
471
472 err = ice_write_phy_reg_e822(hw, port, low_addr, low);
473 if (err) {
474 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
475 low_addr, err);
476 return err;
477 }
478
479 err = ice_write_phy_reg_e822(hw, port, high_addr, high);
480 if (err) {
481 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
482 high_addr, err);
483 return err;
484 }
485
486 return 0;
487 }
488
489 /**
490 * ice_fill_quad_msg_e822 - Fill message data for quad register access
491 * @msg: the PHY message buffer to fill in
492 * @quad: the quad to access
493 * @offset: the register offset
494 *
495 * Fill a message buffer for accessing a register in a quad shared between
496 * multiple PHYs.
497 */
498 static void
ice_fill_quad_msg_e822(struct ice_sbq_msg_input * msg,u8 quad,u16 offset)499 ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
500 {
501 u32 addr;
502
503 msg->dest_dev = rmn_0;
504
505 if ((quad % ICE_NUM_QUAD_TYPE) == 0)
506 addr = Q_0_BASE + offset;
507 else
508 addr = Q_1_BASE + offset;
509
510 msg->msg_addr_low = lower_16_bits(addr);
511 msg->msg_addr_high = upper_16_bits(addr);
512 }
513
514 /**
515 * ice_read_quad_reg_e822 - Read a PHY quad register
516 * @hw: pointer to the HW struct
517 * @quad: quad to read from
518 * @offset: quad register offset to read
519 * @val: on return, the contents read from the quad
520 *
521 * Read a quad register over the device sideband queue. Quad registers are
522 * shared between multiple PHYs.
523 */
524 int
ice_read_quad_reg_e822(struct ice_hw * hw,u8 quad,u16 offset,u32 * val)525 ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
526 {
527 struct ice_sbq_msg_input msg = {0};
528 int err;
529
530 if (quad >= ICE_MAX_QUAD)
531 return -EINVAL;
532
533 ice_fill_quad_msg_e822(&msg, quad, offset);
534 msg.opcode = ice_sbq_msg_rd;
535
536 err = ice_sbq_rw_reg(hw, &msg);
537 if (err) {
538 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
539 err);
540 return err;
541 }
542
543 *val = msg.data;
544
545 return 0;
546 }
547
548 /**
549 * ice_write_quad_reg_e822 - Write a PHY quad register
550 * @hw: pointer to the HW struct
551 * @quad: quad to write to
552 * @offset: quad register offset to write
553 * @val: The value to write to the register
554 *
555 * Write a quad register over the device sideband queue. Quad registers are
556 * shared between multiple PHYs.
557 */
558 int
ice_write_quad_reg_e822(struct ice_hw * hw,u8 quad,u16 offset,u32 val)559 ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
560 {
561 struct ice_sbq_msg_input msg = {0};
562 int err;
563
564 if (quad >= ICE_MAX_QUAD)
565 return -EINVAL;
566
567 ice_fill_quad_msg_e822(&msg, quad, offset);
568 msg.opcode = ice_sbq_msg_wr;
569 msg.data = val;
570
571 err = ice_sbq_rw_reg(hw, &msg);
572 if (err) {
573 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
574 err);
575 return err;
576 }
577
578 return 0;
579 }
580
581 /**
582 * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
583 * @hw: pointer to the HW struct
584 * @quad: the quad to read from
585 * @idx: the timestamp index to read
586 * @tstamp: on return, the 40bit timestamp value
587 *
588 * Read a 40bit timestamp value out of the two associated registers in the
589 * quad memory block that is shared between the internal PHYs of the E822
590 * family of devices.
591 */
592 static int
ice_read_phy_tstamp_e822(struct ice_hw * hw,u8 quad,u8 idx,u64 * tstamp)593 ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
594 {
595 u16 lo_addr, hi_addr;
596 u32 lo, hi;
597 int err;
598
599 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
600 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
601
602 err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
603 if (err) {
604 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
605 err);
606 return err;
607 }
608
609 err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
610 if (err) {
611 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
612 err);
613 return err;
614 }
615
616 /* For E822 based internal PHYs, the timestamp is reported with the
617 * lower 8 bits in the low register, and the upper 32 bits in the high
618 * register.
619 */
620 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
621
622 return 0;
623 }
624
625 /**
626 * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
627 * @hw: pointer to the HW struct
628 * @quad: the quad to read from
629 * @idx: the timestamp index to reset
630 *
631 * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
632 * shared between the internal PHYs on the E822 devices.
633 */
634 static int
ice_clear_phy_tstamp_e822(struct ice_hw * hw,u8 quad,u8 idx)635 ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
636 {
637 u16 lo_addr, hi_addr;
638 int err;
639
640 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
641 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
642
643 err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
644 if (err) {
645 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
646 err);
647 return err;
648 }
649
650 err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
651 if (err) {
652 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
653 err);
654 return err;
655 }
656
657 return 0;
658 }
659
660 /**
661 * ice_read_cgu_reg_e822 - Read a CGU register
662 * @hw: pointer to the HW struct
663 * @addr: Register address to read
664 * @val: storage for register value read
665 *
666 * Read the contents of a register of the Clock Generation Unit. Only
667 * applicable to E822 devices.
668 */
669 static int
ice_read_cgu_reg_e822(struct ice_hw * hw,u32 addr,u32 * val)670 ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
671 {
672 struct ice_sbq_msg_input cgu_msg;
673 int err;
674
675 cgu_msg.opcode = ice_sbq_msg_rd;
676 cgu_msg.dest_dev = cgu;
677 cgu_msg.msg_addr_low = addr;
678 cgu_msg.msg_addr_high = 0x0;
679
680 err = ice_sbq_rw_reg(hw, &cgu_msg);
681 if (err) {
682 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
683 addr, err);
684 return err;
685 }
686
687 *val = cgu_msg.data;
688
689 return err;
690 }
691
692 /**
693 * ice_write_cgu_reg_e822 - Write a CGU register
694 * @hw: pointer to the HW struct
695 * @addr: Register address to write
696 * @val: value to write into the register
697 *
698 * Write the specified value to a register of the Clock Generation Unit. Only
699 * applicable to E822 devices.
700 */
701 static int
ice_write_cgu_reg_e822(struct ice_hw * hw,u32 addr,u32 val)702 ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
703 {
704 struct ice_sbq_msg_input cgu_msg;
705 int err;
706
707 cgu_msg.opcode = ice_sbq_msg_wr;
708 cgu_msg.dest_dev = cgu;
709 cgu_msg.msg_addr_low = addr;
710 cgu_msg.msg_addr_high = 0x0;
711 cgu_msg.data = val;
712
713 err = ice_sbq_rw_reg(hw, &cgu_msg);
714 if (err) {
715 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
716 addr, err);
717 return err;
718 }
719
720 return err;
721 }
722
723 /**
724 * ice_clk_freq_str - Convert time_ref_freq to string
725 * @clk_freq: Clock frequency
726 *
727 * Convert the specified TIME_REF clock frequency to a string.
728 */
ice_clk_freq_str(u8 clk_freq)729 static const char *ice_clk_freq_str(u8 clk_freq)
730 {
731 switch ((enum ice_time_ref_freq)clk_freq) {
732 case ICE_TIME_REF_FREQ_25_000:
733 return "25 MHz";
734 case ICE_TIME_REF_FREQ_122_880:
735 return "122.88 MHz";
736 case ICE_TIME_REF_FREQ_125_000:
737 return "125 MHz";
738 case ICE_TIME_REF_FREQ_153_600:
739 return "153.6 MHz";
740 case ICE_TIME_REF_FREQ_156_250:
741 return "156.25 MHz";
742 case ICE_TIME_REF_FREQ_245_760:
743 return "245.76 MHz";
744 default:
745 return "Unknown";
746 }
747 }
748
749 /**
750 * ice_clk_src_str - Convert time_ref_src to string
751 * @clk_src: Clock source
752 *
753 * Convert the specified clock source to its string name.
754 */
ice_clk_src_str(u8 clk_src)755 static const char *ice_clk_src_str(u8 clk_src)
756 {
757 switch ((enum ice_clk_src)clk_src) {
758 case ICE_CLK_SRC_TCX0:
759 return "TCX0";
760 case ICE_CLK_SRC_TIME_REF:
761 return "TIME_REF";
762 default:
763 return "Unknown";
764 }
765 }
766
767 /**
768 * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
769 * @hw: pointer to the HW struct
770 * @clk_freq: Clock frequency to program
771 * @clk_src: Clock source to select (TIME_REF, or TCX0)
772 *
773 * Configure the Clock Generation Unit with the desired clock frequency and
774 * time reference, enabling the PLL which drives the PTP hardware clock.
775 */
776 static int
ice_cfg_cgu_pll_e822(struct ice_hw * hw,enum ice_time_ref_freq clk_freq,enum ice_clk_src clk_src)777 ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
778 enum ice_clk_src clk_src)
779 {
780 union tspll_ro_bwm_lf bwm_lf;
781 union nac_cgu_dword19 dw19;
782 union nac_cgu_dword22 dw22;
783 union nac_cgu_dword24 dw24;
784 union nac_cgu_dword9 dw9;
785 int err;
786
787 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
788 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
789 clk_freq);
790 return -EINVAL;
791 }
792
793 if (clk_src >= NUM_ICE_CLK_SRC) {
794 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
795 clk_src);
796 return -EINVAL;
797 }
798
799 if (clk_src == ICE_CLK_SRC_TCX0 &&
800 clk_freq != ICE_TIME_REF_FREQ_25_000) {
801 dev_warn(ice_hw_to_dev(hw),
802 "TCX0 only supports 25 MHz frequency\n");
803 return -EINVAL;
804 }
805
806 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
807 if (err)
808 return err;
809
810 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
811 if (err)
812 return err;
813
814 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
815 if (err)
816 return err;
817
818 /* Log the current clock configuration */
819 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
820 dw24.field.ts_pll_enable ? "enabled" : "disabled",
821 ice_clk_src_str(dw24.field.time_ref_sel),
822 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
823 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
824
825 /* Disable the PLL before changing the clock source or frequency */
826 if (dw24.field.ts_pll_enable) {
827 dw24.field.ts_pll_enable = 0;
828
829 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
830 if (err)
831 return err;
832 }
833
834 /* Set the frequency */
835 dw9.field.time_ref_freq_sel = clk_freq;
836 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
837 if (err)
838 return err;
839
840 /* Configure the TS PLL feedback divisor */
841 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
842 if (err)
843 return err;
844
845 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
846 dw19.field.tspll_ndivratio = 1;
847
848 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
849 if (err)
850 return err;
851
852 /* Configure the TS PLL post divisor */
853 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
854 if (err)
855 return err;
856
857 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
858 dw22.field.time1588clk_sel_div2 = 0;
859
860 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
861 if (err)
862 return err;
863
864 /* Configure the TS PLL pre divisor and clock source */
865 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
866 if (err)
867 return err;
868
869 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
870 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
871 dw24.field.time_ref_sel = clk_src;
872
873 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
874 if (err)
875 return err;
876
877 /* Finally, enable the PLL */
878 dw24.field.ts_pll_enable = 1;
879
880 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
881 if (err)
882 return err;
883
884 /* Wait to verify if the PLL locks */
885 usleep_range(1000, 5000);
886
887 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
888 if (err)
889 return err;
890
891 if (!bwm_lf.field.plllock_true_lock_cri) {
892 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
893 return -EBUSY;
894 }
895
896 /* Log the current clock configuration */
897 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
898 dw24.field.ts_pll_enable ? "enabled" : "disabled",
899 ice_clk_src_str(dw24.field.time_ref_sel),
900 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
901 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
902
903 return 0;
904 }
905
906 /**
907 * ice_init_cgu_e822 - Initialize CGU with settings from firmware
908 * @hw: pointer to the HW structure
909 *
910 * Initialize the Clock Generation Unit of the E822 device.
911 */
ice_init_cgu_e822(struct ice_hw * hw)912 static int ice_init_cgu_e822(struct ice_hw *hw)
913 {
914 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
915 union tspll_cntr_bist_settings cntr_bist;
916 int err;
917
918 err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
919 &cntr_bist.val);
920 if (err)
921 return err;
922
923 /* Disable sticky lock detection so lock err reported is accurate */
924 cntr_bist.field.i_plllock_sel_0 = 0;
925 cntr_bist.field.i_plllock_sel_1 = 0;
926
927 err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
928 cntr_bist.val);
929 if (err)
930 return err;
931
932 /* Configure the CGU PLL using the parameters from the function
933 * capabilities.
934 */
935 err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
936 (enum ice_clk_src)ts_info->clk_src);
937 if (err)
938 return err;
939
940 return 0;
941 }
942
943 /**
944 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
945 * @hw: pointer to the HW struct
946 *
947 * Set the window length used for the vernier port calibration process.
948 */
ice_ptp_set_vernier_wl(struct ice_hw * hw)949 static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
950 {
951 u8 port;
952
953 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
954 int err;
955
956 err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
957 PTP_VERNIER_WL);
958 if (err) {
959 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
960 port, err);
961 return err;
962 }
963 }
964
965 return 0;
966 }
967
968 /**
969 * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
970 * @hw: pointer to HW struct
971 *
972 * Perform PHC initialization steps specific to E822 devices.
973 */
ice_ptp_init_phc_e822(struct ice_hw * hw)974 static int ice_ptp_init_phc_e822(struct ice_hw *hw)
975 {
976 int err;
977 u32 regval;
978
979 /* Enable reading switch and PHY registers over the sideband queue */
980 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
981 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
982 regval = rd32(hw, PF_SB_REM_DEV_CTL);
983 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
984 PF_SB_REM_DEV_CTL_PHY0);
985 wr32(hw, PF_SB_REM_DEV_CTL, regval);
986
987 /* Initialize the Clock Generation Unit */
988 err = ice_init_cgu_e822(hw);
989 if (err)
990 return err;
991
992 /* Set window length for all the ports */
993 return ice_ptp_set_vernier_wl(hw);
994 }
995
996 /**
997 * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
998 * @hw: pointer to the HW struct
999 * @time: Time to initialize the PHY port clocks to
1000 *
1001 * Program the PHY port registers with a new initial time value. The port
1002 * clock will be initialized once the driver issues an INIT_TIME sync
1003 * command. The time value is the upper 32 bits of the PHY timer, usually in
1004 * units of nominal nanoseconds.
1005 */
1006 static int
ice_ptp_prep_phy_time_e822(struct ice_hw * hw,u32 time)1007 ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
1008 {
1009 u64 phy_time;
1010 u8 port;
1011 int err;
1012
1013 /* The time represents the upper 32 bits of the PHY timer, so we need
1014 * to shift to account for this when programming.
1015 */
1016 phy_time = (u64)time << 32;
1017
1018 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1019 /* Tx case */
1020 err = ice_write_64b_phy_reg_e822(hw, port,
1021 P_REG_TX_TIMER_INC_PRE_L,
1022 phy_time);
1023 if (err)
1024 goto exit_err;
1025
1026 /* Rx case */
1027 err = ice_write_64b_phy_reg_e822(hw, port,
1028 P_REG_RX_TIMER_INC_PRE_L,
1029 phy_time);
1030 if (err)
1031 goto exit_err;
1032 }
1033
1034 return 0;
1035
1036 exit_err:
1037 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1038 port, err);
1039
1040 return err;
1041 }
1042
1043 /**
1044 * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
1045 * @hw: pointer to HW struct
1046 * @port: Port number to be programmed
1047 * @time: time in cycles to adjust the port Tx and Rx clocks
1048 *
1049 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1050 * registers. The atomic adjustment won't be completed until the driver issues
1051 * an ADJ_TIME command.
1052 *
1053 * Note that time is not in units of nanoseconds. It is in clock time
1054 * including the lower sub-nanosecond portion of the port timer.
1055 *
1056 * Negative adjustments are supported using 2s complement arithmetic.
1057 */
1058 int
ice_ptp_prep_port_adj_e822(struct ice_hw * hw,u8 port,s64 time)1059 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
1060 {
1061 u32 l_time, u_time;
1062 int err;
1063
1064 l_time = lower_32_bits(time);
1065 u_time = upper_32_bits(time);
1066
1067 /* Tx case */
1068 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1069 l_time);
1070 if (err)
1071 goto exit_err;
1072
1073 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1074 u_time);
1075 if (err)
1076 goto exit_err;
1077
1078 /* Rx case */
1079 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1080 l_time);
1081 if (err)
1082 goto exit_err;
1083
1084 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1085 u_time);
1086 if (err)
1087 goto exit_err;
1088
1089 return 0;
1090
1091 exit_err:
1092 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1093 port, err);
1094 return err;
1095 }
1096
1097 /**
1098 * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
1099 * @hw: pointer to HW struct
1100 * @adj: adjustment in nanoseconds
1101 *
1102 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1103 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1104 * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
1105 */
1106 static int
ice_ptp_prep_phy_adj_e822(struct ice_hw * hw,s32 adj)1107 ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
1108 {
1109 s64 cycles;
1110 u8 port;
1111
1112 /* The port clock supports adjustment of the sub-nanosecond portion of
1113 * the clock. We shift the provided adjustment in nanoseconds to
1114 * calculate the appropriate adjustment to program into the PHY ports.
1115 */
1116 if (adj > 0)
1117 cycles = (s64)adj << 32;
1118 else
1119 cycles = -(((s64)-adj) << 32);
1120
1121 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1122 int err;
1123
1124 err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
1125 if (err)
1126 return err;
1127 }
1128
1129 return 0;
1130 }
1131
1132 /**
1133 * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
1134 * @hw: pointer to HW struct
1135 * @incval: new increment value to prepare
1136 *
1137 * Prepare each of the PHY ports for a new increment value by programming the
1138 * port's TIMETUS registers. The new increment value will be updated after
1139 * issuing an INIT_INCVAL command.
1140 */
1141 static int
ice_ptp_prep_phy_incval_e822(struct ice_hw * hw,u64 incval)1142 ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
1143 {
1144 int err;
1145 u8 port;
1146
1147 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1148 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
1149 incval);
1150 if (err)
1151 goto exit_err;
1152 }
1153
1154 return 0;
1155
1156 exit_err:
1157 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1158 port, err);
1159
1160 return err;
1161 }
1162
1163 /**
1164 * ice_ptp_read_port_capture - Read a port's local time capture
1165 * @hw: pointer to HW struct
1166 * @port: Port number to read
1167 * @tx_ts: on return, the Tx port time capture
1168 * @rx_ts: on return, the Rx port time capture
1169 *
1170 * Read the port's Tx and Rx local time capture values.
1171 *
1172 * Note this has no equivalent for the E810 devices.
1173 */
1174 static int
ice_ptp_read_port_capture(struct ice_hw * hw,u8 port,u64 * tx_ts,u64 * rx_ts)1175 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1176 {
1177 int err;
1178
1179 /* Tx case */
1180 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1181 if (err) {
1182 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1183 err);
1184 return err;
1185 }
1186
1187 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1188 (unsigned long long)*tx_ts);
1189
1190 /* Rx case */
1191 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1192 if (err) {
1193 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1194 err);
1195 return err;
1196 }
1197
1198 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1199 (unsigned long long)*rx_ts);
1200
1201 return 0;
1202 }
1203
1204 /**
1205 * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
1206 * @hw: pointer to HW struct
1207 * @port: Port to which cmd has to be sent
1208 * @cmd: Command to be sent to the port
1209 *
1210 * Prepare the requested port for an upcoming timer sync command.
1211 *
1212 * Do not use this function directly. If you want to configure exactly one
1213 * port, use ice_ptp_one_port_cmd() instead.
1214 */
1215 static int
ice_ptp_write_port_cmd_e822(struct ice_hw * hw,u8 port,enum ice_ptp_tmr_cmd cmd)1216 ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
1217 {
1218 u32 cmd_val, val;
1219 u8 tmr_idx;
1220 int err;
1221
1222 tmr_idx = ice_get_ptp_src_clock_index(hw);
1223 cmd_val = tmr_idx << SEL_PHY_SRC;
1224 switch (cmd) {
1225 case INIT_TIME:
1226 cmd_val |= PHY_CMD_INIT_TIME;
1227 break;
1228 case INIT_INCVAL:
1229 cmd_val |= PHY_CMD_INIT_INCVAL;
1230 break;
1231 case ADJ_TIME:
1232 cmd_val |= PHY_CMD_ADJ_TIME;
1233 break;
1234 case READ_TIME:
1235 cmd_val |= PHY_CMD_READ_TIME;
1236 break;
1237 case ADJ_TIME_AT_TIME:
1238 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1239 break;
1240 case ICE_PTP_NOP:
1241 break;
1242 }
1243
1244 /* Tx case */
1245 /* Read, modify, write */
1246 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
1247 if (err) {
1248 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1249 err);
1250 return err;
1251 }
1252
1253 /* Modify necessary bits only and perform write */
1254 val &= ~TS_CMD_MASK;
1255 val |= cmd_val;
1256
1257 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
1258 if (err) {
1259 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1260 err);
1261 return err;
1262 }
1263
1264 /* Rx case */
1265 /* Read, modify, write */
1266 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
1267 if (err) {
1268 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1269 err);
1270 return err;
1271 }
1272
1273 /* Modify necessary bits only and perform write */
1274 val &= ~TS_CMD_MASK;
1275 val |= cmd_val;
1276
1277 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
1278 if (err) {
1279 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1280 err);
1281 return err;
1282 }
1283
1284 return 0;
1285 }
1286
1287 /**
1288 * ice_ptp_one_port_cmd - Prepare one port for a timer command
1289 * @hw: pointer to the HW struct
1290 * @configured_port: the port to configure with configured_cmd
1291 * @configured_cmd: timer command to prepare on the configured_port
1292 *
1293 * Prepare the configured_port for the configured_cmd, and prepare all other
1294 * ports for ICE_PTP_NOP. This causes the configured_port to execute the
1295 * desired command while all other ports perform no operation.
1296 */
1297 static int
ice_ptp_one_port_cmd(struct ice_hw * hw,u8 configured_port,enum ice_ptp_tmr_cmd configured_cmd)1298 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
1299 enum ice_ptp_tmr_cmd configured_cmd)
1300 {
1301 u8 port;
1302
1303 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1304 enum ice_ptp_tmr_cmd cmd;
1305 int err;
1306
1307 if (port == configured_port)
1308 cmd = configured_cmd;
1309 else
1310 cmd = ICE_PTP_NOP;
1311
1312 err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
1313 if (err)
1314 return err;
1315 }
1316
1317 return 0;
1318 }
1319
1320 /**
1321 * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
1322 * @hw: pointer to the HW struct
1323 * @cmd: timer command to prepare
1324 *
1325 * Prepare all ports connected to this device for an upcoming timer sync
1326 * command.
1327 */
1328 static int
ice_ptp_port_cmd_e822(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)1329 ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1330 {
1331 u8 port;
1332
1333 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1334 int err;
1335
1336 err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
1337 if (err)
1338 return err;
1339 }
1340
1341 return 0;
1342 }
1343
1344 /* E822 Vernier calibration functions
1345 *
1346 * The following functions are used as part of the vernier calibration of
1347 * a port. This calibration increases the precision of the timestamps on the
1348 * port.
1349 */
1350
1351 /**
1352 * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
1353 * @hw: pointer to HW struct
1354 * @port: the port to read from
1355 * @link_out: if non-NULL, holds link speed on success
1356 * @fec_out: if non-NULL, holds FEC algorithm on success
1357 *
1358 * Read the serdes data for the PHY port and extract the link speed and FEC
1359 * algorithm.
1360 */
1361 static int
ice_phy_get_speed_and_fec_e822(struct ice_hw * hw,u8 port,enum ice_ptp_link_spd * link_out,enum ice_ptp_fec_mode * fec_out)1362 ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
1363 enum ice_ptp_link_spd *link_out,
1364 enum ice_ptp_fec_mode *fec_out)
1365 {
1366 enum ice_ptp_link_spd link;
1367 enum ice_ptp_fec_mode fec;
1368 u32 serdes;
1369 int err;
1370
1371 err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
1372 if (err) {
1373 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1374 return err;
1375 }
1376
1377 /* Determine the FEC algorithm */
1378 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1379
1380 serdes &= P_REG_LINK_SPEED_SERDES_M;
1381
1382 /* Determine the link speed */
1383 if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1384 switch (serdes) {
1385 case ICE_PTP_SERDES_25G:
1386 link = ICE_PTP_LNK_SPD_25G_RS;
1387 break;
1388 case ICE_PTP_SERDES_50G:
1389 link = ICE_PTP_LNK_SPD_50G_RS;
1390 break;
1391 case ICE_PTP_SERDES_100G:
1392 link = ICE_PTP_LNK_SPD_100G_RS;
1393 break;
1394 default:
1395 return -EIO;
1396 }
1397 } else {
1398 switch (serdes) {
1399 case ICE_PTP_SERDES_1G:
1400 link = ICE_PTP_LNK_SPD_1G;
1401 break;
1402 case ICE_PTP_SERDES_10G:
1403 link = ICE_PTP_LNK_SPD_10G;
1404 break;
1405 case ICE_PTP_SERDES_25G:
1406 link = ICE_PTP_LNK_SPD_25G;
1407 break;
1408 case ICE_PTP_SERDES_40G:
1409 link = ICE_PTP_LNK_SPD_40G;
1410 break;
1411 case ICE_PTP_SERDES_50G:
1412 link = ICE_PTP_LNK_SPD_50G;
1413 break;
1414 default:
1415 return -EIO;
1416 }
1417 }
1418
1419 if (link_out)
1420 *link_out = link;
1421 if (fec_out)
1422 *fec_out = fec;
1423
1424 return 0;
1425 }
1426
1427 /**
1428 * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
1429 * @hw: pointer to HW struct
1430 * @port: to configure the quad for
1431 */
ice_phy_cfg_lane_e822(struct ice_hw * hw,u8 port)1432 static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
1433 {
1434 enum ice_ptp_link_spd link_spd;
1435 int err;
1436 u32 val;
1437 u8 quad;
1438
1439 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
1440 if (err) {
1441 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1442 err);
1443 return;
1444 }
1445
1446 quad = port / ICE_PORTS_PER_QUAD;
1447
1448 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1449 if (err) {
1450 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1451 err);
1452 return;
1453 }
1454
1455 if (link_spd >= ICE_PTP_LNK_SPD_40G)
1456 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1457 else
1458 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1459
1460 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1461 if (err) {
1462 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1463 err);
1464 return;
1465 }
1466 }
1467
1468 /**
1469 * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
1470 * @hw: pointer to the HW structure
1471 * @port: the port to configure
1472 *
1473 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1474 * hardware clock time units (TUs). That is, determine the number of TUs per
1475 * serdes unit interval, and program the UIX registers with this conversion.
1476 *
1477 * This conversion is used as part of the calibration process when determining
1478 * the additional error of a timestamp vs the real time of transmission or
1479 * receipt of the packet.
1480 *
1481 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1482 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1483 *
1484 * To calculate the conversion ratio, we use the following facts:
1485 *
1486 * a) the clock frequency in Hz (cycles per second)
1487 * b) the number of TUs per cycle (the increment value of the clock)
1488 * c) 1 second per 1 billion nanoseconds
1489 * d) the duration of 66 UIs in nanoseconds
1490 *
1491 * Given these facts, we can use the following table to work out what ratios
1492 * to multiply in order to get the number of TUs per 66 UIs:
1493 *
1494 * cycles | 1 second | incval (TUs) | nanoseconds
1495 * -------+--------------+--------------+-------------
1496 * second | 1 billion ns | cycle | 66 UIs
1497 *
1498 * To perform the multiplication using integers without too much loss of
1499 * precision, we can take use the following equation:
1500 *
1501 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1502 *
1503 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1504 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1505 *
1506 * The increment value has a maximum expected range of about 34 bits, while
1507 * the frequency value is about 29 bits. Multiplying these values shouldn't
1508 * overflow the 64 bits. However, we must then further multiply them again by
1509 * the Serdes unit interval duration. To avoid overflow here, we split the
1510 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1511 * a divide by 390,625,000. This does lose some precision, but avoids
1512 * miscalculation due to arithmetic overflow.
1513 */
ice_phy_cfg_uix_e822(struct ice_hw * hw,u8 port)1514 static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
1515 {
1516 u64 cur_freq, clk_incval, tu_per_sec, uix;
1517 int err;
1518
1519 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1520 clk_incval = ice_ptp_read_src_incval(hw);
1521
1522 /* Calculate TUs per second divided by 256 */
1523 tu_per_sec = (cur_freq * clk_incval) >> 8;
1524
1525 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1526 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1527
1528 /* Program the 10Gb/40Gb conversion ratio */
1529 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1530
1531 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
1532 uix);
1533 if (err) {
1534 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1535 err);
1536 return err;
1537 }
1538
1539 /* Program the 25Gb/100Gb conversion ratio */
1540 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1541
1542 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
1543 uix);
1544 if (err) {
1545 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1546 err);
1547 return err;
1548 }
1549
1550 return 0;
1551 }
1552
1553 /**
1554 * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
1555 * @hw: pointer to the HW struct
1556 * @port: port to configure
1557 *
1558 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1559 * timestamp calibration process. This depends on the link speed, as the PHY
1560 * uses different markers depending on the speed.
1561 *
1562 * 1Gb/10Gb/25Gb:
1563 * - Tx/Rx PAR/PCS markers
1564 *
1565 * 25Gb RS:
1566 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1567 *
1568 * 40Gb/50Gb:
1569 * - Tx/Rx PAR/PCS markers
1570 * - Rx Deskew PAR/PCS markers
1571 *
1572 * 50G RS and 100GB RS:
1573 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1574 * - Rx Deskew PAR/PCS markers
1575 * - Tx PAR/PCS markers
1576 *
1577 * To calculate the conversion, we use the PHC clock frequency (cycles per
1578 * second), the increment value (TUs per cycle), and the related PHY clock
1579 * frequency to calculate the TUs per unit of the PHY link clock. The
1580 * following table shows how the units convert:
1581 *
1582 * cycles | TUs | second
1583 * -------+-------+--------
1584 * second | cycle | cycles
1585 *
1586 * For each conversion register, look up the appropriate frequency from the
1587 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1588 * this to the appropriate register, preparing hardware to perform timestamp
1589 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1590 * in order to calibrate for the internal PHY delays.
1591 *
1592 * Note that the increment value ranges up to ~34 bits, and the clock
1593 * frequency is ~29 bits, so multiplying them together should fit within the
1594 * 64 bit arithmetic.
1595 */
ice_phy_cfg_parpcs_e822(struct ice_hw * hw,u8 port)1596 static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
1597 {
1598 u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1599 enum ice_ptp_link_spd link_spd;
1600 enum ice_ptp_fec_mode fec_mode;
1601 int err;
1602
1603 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1604 if (err)
1605 return err;
1606
1607 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1608 clk_incval = ice_ptp_read_src_incval(hw);
1609
1610 /* Calculate TUs per cycle of the PHC clock */
1611 tu_per_sec = cur_freq * clk_incval;
1612
1613 /* For each PHY conversion register, look up the appropriate link
1614 * speed frequency and determine the TUs per that clock's cycle time.
1615 * Split this into a high and low value and then program the
1616 * appropriate register. If that link speed does not use the
1617 * associated register, write zeros to clear it instead.
1618 */
1619
1620 /* P_REG_PAR_TX_TUS */
1621 if (e822_vernier[link_spd].tx_par_clk)
1622 phy_tus = div_u64(tu_per_sec,
1623 e822_vernier[link_spd].tx_par_clk);
1624 else
1625 phy_tus = 0;
1626
1627 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
1628 phy_tus);
1629 if (err)
1630 return err;
1631
1632 /* P_REG_PAR_RX_TUS */
1633 if (e822_vernier[link_spd].rx_par_clk)
1634 phy_tus = div_u64(tu_per_sec,
1635 e822_vernier[link_spd].rx_par_clk);
1636 else
1637 phy_tus = 0;
1638
1639 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
1640 phy_tus);
1641 if (err)
1642 return err;
1643
1644 /* P_REG_PCS_TX_TUS */
1645 if (e822_vernier[link_spd].tx_pcs_clk)
1646 phy_tus = div_u64(tu_per_sec,
1647 e822_vernier[link_spd].tx_pcs_clk);
1648 else
1649 phy_tus = 0;
1650
1651 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
1652 phy_tus);
1653 if (err)
1654 return err;
1655
1656 /* P_REG_PCS_RX_TUS */
1657 if (e822_vernier[link_spd].rx_pcs_clk)
1658 phy_tus = div_u64(tu_per_sec,
1659 e822_vernier[link_spd].rx_pcs_clk);
1660 else
1661 phy_tus = 0;
1662
1663 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
1664 phy_tus);
1665 if (err)
1666 return err;
1667
1668 /* P_REG_DESK_PAR_TX_TUS */
1669 if (e822_vernier[link_spd].tx_desk_rsgb_par)
1670 phy_tus = div_u64(tu_per_sec,
1671 e822_vernier[link_spd].tx_desk_rsgb_par);
1672 else
1673 phy_tus = 0;
1674
1675 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1676 phy_tus);
1677 if (err)
1678 return err;
1679
1680 /* P_REG_DESK_PAR_RX_TUS */
1681 if (e822_vernier[link_spd].rx_desk_rsgb_par)
1682 phy_tus = div_u64(tu_per_sec,
1683 e822_vernier[link_spd].rx_desk_rsgb_par);
1684 else
1685 phy_tus = 0;
1686
1687 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1688 phy_tus);
1689 if (err)
1690 return err;
1691
1692 /* P_REG_DESK_PCS_TX_TUS */
1693 if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1694 phy_tus = div_u64(tu_per_sec,
1695 e822_vernier[link_spd].tx_desk_rsgb_pcs);
1696 else
1697 phy_tus = 0;
1698
1699 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1700 phy_tus);
1701 if (err)
1702 return err;
1703
1704 /* P_REG_DESK_PCS_RX_TUS */
1705 if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1706 phy_tus = div_u64(tu_per_sec,
1707 e822_vernier[link_spd].rx_desk_rsgb_pcs);
1708 else
1709 phy_tus = 0;
1710
1711 return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1712 phy_tus);
1713 }
1714
1715 /**
1716 * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
1717 * @hw: pointer to the HW struct
1718 * @link_spd: the Link speed to calculate for
1719 *
1720 * Calculate the fixed offset due to known static latency data.
1721 */
1722 static u64
ice_calc_fixed_tx_offset_e822(struct ice_hw * hw,enum ice_ptp_link_spd link_spd)1723 ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1724 {
1725 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1726
1727 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1728 clk_incval = ice_ptp_read_src_incval(hw);
1729
1730 /* Calculate TUs per second */
1731 tu_per_sec = cur_freq * clk_incval;
1732
1733 /* Calculate number of TUs to add for the fixed Tx latency. Since the
1734 * latency measurement is in 1/100th of a nanosecond, we need to
1735 * multiply by tu_per_sec and then divide by 1e11. This calculation
1736 * overflows 64 bit integer arithmetic, so break it up into two
1737 * divisions by 1e4 first then by 1e7.
1738 */
1739 fixed_offset = div_u64(tu_per_sec, 10000);
1740 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1741 fixed_offset = div_u64(fixed_offset, 10000000);
1742
1743 return fixed_offset;
1744 }
1745
1746 /**
1747 * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
1748 * @hw: pointer to the HW struct
1749 * @port: the PHY port to configure
1750 *
1751 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1752 * adjust Tx timestamps by. This is calculated by combining some known static
1753 * latency along with the Vernier offset computations done by hardware.
1754 *
1755 * This function must be called only after the offset registers are valid,
1756 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
1757 * has measured the offset.
1758 *
1759 * To avoid overflow, when calculating the offset based on the known static
1760 * latency values, we use measurements in 1/100th of a nanosecond, and divide
1761 * the TUs per second up front. This avoids overflow while allowing
1762 * calculation of the adjustment using integer arithmetic.
1763 */
ice_phy_cfg_tx_offset_e822(struct ice_hw * hw,u8 port)1764 static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
1765 {
1766 enum ice_ptp_link_spd link_spd;
1767 enum ice_ptp_fec_mode fec_mode;
1768 u64 total_offset, val;
1769 int err;
1770
1771 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1772 if (err)
1773 return err;
1774
1775 total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1776
1777 /* Read the first Vernier offset from the PHY register and add it to
1778 * the total offset.
1779 */
1780 if (link_spd == ICE_PTP_LNK_SPD_1G ||
1781 link_spd == ICE_PTP_LNK_SPD_10G ||
1782 link_spd == ICE_PTP_LNK_SPD_25G ||
1783 link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1784 link_spd == ICE_PTP_LNK_SPD_40G ||
1785 link_spd == ICE_PTP_LNK_SPD_50G) {
1786 err = ice_read_64b_phy_reg_e822(hw, port,
1787 P_REG_PAR_PCS_TX_OFFSET_L,
1788 &val);
1789 if (err)
1790 return err;
1791
1792 total_offset += val;
1793 }
1794
1795 /* For Tx, we only need to use the second Vernier offset for
1796 * multi-lane link speeds with RS-FEC. The lanes will always be
1797 * aligned.
1798 */
1799 if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1800 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1801 err = ice_read_64b_phy_reg_e822(hw, port,
1802 P_REG_PAR_TX_TIME_L,
1803 &val);
1804 if (err)
1805 return err;
1806
1807 total_offset += val;
1808 }
1809
1810 /* Now that the total offset has been calculated, program it to the
1811 * PHY and indicate that the Tx offset is ready. After this,
1812 * timestamps will be enabled.
1813 */
1814 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1815 total_offset);
1816 if (err)
1817 return err;
1818
1819 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1820 if (err)
1821 return err;
1822
1823 return 0;
1824 }
1825
1826 /**
1827 * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode
1828 * @hw: pointer to the HW struct
1829 * @port: the PHY port to configure
1830 *
1831 * Calculate and program the fixed Tx offset, and indicate that the offset is
1832 * ready. This can be used when operating in bypass mode.
1833 */
1834 static int
ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw * hw,u8 port)1835 ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port)
1836 {
1837 enum ice_ptp_link_spd link_spd;
1838 enum ice_ptp_fec_mode fec_mode;
1839 u64 total_offset;
1840 int err;
1841
1842 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1843 if (err)
1844 return err;
1845
1846 total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1847
1848 /* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L
1849 * register, then indicate that the Tx offset is ready. After this,
1850 * timestamps will be enabled.
1851 *
1852 * Note that this skips including the more precise offsets generated
1853 * by the Vernier calibration.
1854 */
1855 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1856 total_offset);
1857 if (err)
1858 return err;
1859
1860 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1861 if (err)
1862 return err;
1863
1864 return 0;
1865 }
1866
1867 /**
1868 * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
1869 * @hw: pointer to the HW struct
1870 * @port: the PHY port to adjust for
1871 * @link_spd: the current link speed of the PHY
1872 * @fec_mode: the current FEC mode of the PHY
1873 * @pmd_adj: on return, the amount to adjust the Rx total offset by
1874 *
1875 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
1876 * This varies by link speed and FEC mode. The value calculated accounts for
1877 * various delays caused when receiving a packet.
1878 */
1879 static int
ice_phy_calc_pmd_adj_e822(struct ice_hw * hw,u8 port,enum ice_ptp_link_spd link_spd,enum ice_ptp_fec_mode fec_mode,u64 * pmd_adj)1880 ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
1881 enum ice_ptp_link_spd link_spd,
1882 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
1883 {
1884 u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
1885 u8 pmd_align;
1886 u32 val;
1887 int err;
1888
1889 err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
1890 if (err) {
1891 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
1892 err);
1893 return err;
1894 }
1895
1896 pmd_align = (u8)val;
1897
1898 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1899 clk_incval = ice_ptp_read_src_incval(hw);
1900
1901 /* Calculate TUs per second */
1902 tu_per_sec = cur_freq * clk_incval;
1903
1904 /* The PMD alignment adjustment measurement depends on the link speed,
1905 * and whether FEC is enabled. For each link speed, the alignment
1906 * adjustment is calculated by dividing a value by the length of
1907 * a Time Unit in nanoseconds.
1908 *
1909 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
1910 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
1911 * 10G w/FEC: align * 0.1 * 32/33
1912 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
1913 * 25G w/FEC: align * 0.4 * 32/33
1914 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
1915 * 40G w/FEC: align * 0.1 * 32/33
1916 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
1917 * 50G w/FEC: align * 0.8 * 32/33
1918 *
1919 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
1920 *
1921 * To allow for calculating this value using integer arithmetic, we
1922 * instead start with the number of TUs per second, (inverse of the
1923 * length of a Time Unit in nanoseconds), multiply by a value based
1924 * on the PMD alignment register, and then divide by the right value
1925 * calculated based on the table above. To avoid integer overflow this
1926 * division is broken up into a step of dividing by 125 first.
1927 */
1928 if (link_spd == ICE_PTP_LNK_SPD_1G) {
1929 if (pmd_align == 4)
1930 mult = 10;
1931 else
1932 mult = (pmd_align + 6) % 10;
1933 } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
1934 link_spd == ICE_PTP_LNK_SPD_25G ||
1935 link_spd == ICE_PTP_LNK_SPD_40G ||
1936 link_spd == ICE_PTP_LNK_SPD_50G) {
1937 /* If Clause 74 FEC, always calculate PMD adjust */
1938 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
1939 mult = pmd_align;
1940 else
1941 mult = 0;
1942 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1943 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1944 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1945 if (pmd_align < 17)
1946 mult = pmd_align + 40;
1947 else
1948 mult = pmd_align;
1949 } else {
1950 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
1951 link_spd);
1952 mult = 0;
1953 }
1954
1955 /* In some cases, there's no need to adjust for the PMD alignment */
1956 if (!mult) {
1957 *pmd_adj = 0;
1958 return 0;
1959 }
1960
1961 /* Calculate the adjustment by multiplying TUs per second by the
1962 * appropriate multiplier and divisor. To avoid overflow, we first
1963 * divide by 125, and then handle remaining divisor based on the link
1964 * speed pmd_adj_divisor value.
1965 */
1966 adj = div_u64(tu_per_sec, 125);
1967 adj *= mult;
1968 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
1969
1970 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
1971 * cycle count is necessary.
1972 */
1973 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
1974 u64 cycle_adj;
1975 u8 rx_cycle;
1976
1977 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
1978 &val);
1979 if (err) {
1980 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
1981 err);
1982 return err;
1983 }
1984
1985 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
1986 if (rx_cycle) {
1987 mult = (4 - rx_cycle) * 40;
1988
1989 cycle_adj = div_u64(tu_per_sec, 125);
1990 cycle_adj *= mult;
1991 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1992
1993 adj += cycle_adj;
1994 }
1995 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
1996 u64 cycle_adj;
1997 u8 rx_cycle;
1998
1999 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
2000 &val);
2001 if (err) {
2002 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
2003 err);
2004 return err;
2005 }
2006
2007 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
2008 if (rx_cycle) {
2009 mult = rx_cycle * 40;
2010
2011 cycle_adj = div_u64(tu_per_sec, 125);
2012 cycle_adj *= mult;
2013 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2014
2015 adj += cycle_adj;
2016 }
2017 }
2018
2019 /* Return the calculated adjustment */
2020 *pmd_adj = adj;
2021
2022 return 0;
2023 }
2024
2025 /**
2026 * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
2027 * @hw: pointer to HW struct
2028 * @link_spd: The Link speed to calculate for
2029 *
2030 * Determine the fixed Rx latency for a given link speed.
2031 */
2032 static u64
ice_calc_fixed_rx_offset_e822(struct ice_hw * hw,enum ice_ptp_link_spd link_spd)2033 ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
2034 {
2035 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
2036
2037 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
2038 clk_incval = ice_ptp_read_src_incval(hw);
2039
2040 /* Calculate TUs per second */
2041 tu_per_sec = cur_freq * clk_incval;
2042
2043 /* Calculate number of TUs to add for the fixed Rx latency. Since the
2044 * latency measurement is in 1/100th of a nanosecond, we need to
2045 * multiply by tu_per_sec and then divide by 1e11. This calculation
2046 * overflows 64 bit integer arithmetic, so break it up into two
2047 * divisions by 1e4 first then by 1e7.
2048 */
2049 fixed_offset = div_u64(tu_per_sec, 10000);
2050 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2051 fixed_offset = div_u64(fixed_offset, 10000000);
2052
2053 return fixed_offset;
2054 }
2055
2056 /**
2057 * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
2058 * @hw: pointer to the HW struct
2059 * @port: the PHY port to configure
2060 *
2061 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2062 * adjust Rx timestamps by. This combines calculations from the Vernier offset
2063 * measurements taken in hardware with some data about known fixed delay as
2064 * well as adjusting for multi-lane alignment delay.
2065 *
2066 * This function must be called only after the offset registers are valid,
2067 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2068 * has measured the offset.
2069 *
2070 * To avoid overflow, when calculating the offset based on the known static
2071 * latency values, we use measurements in 1/100th of a nanosecond, and divide
2072 * the TUs per second up front. This avoids overflow while allowing
2073 * calculation of the adjustment using integer arithmetic.
2074 */
ice_phy_cfg_rx_offset_e822(struct ice_hw * hw,u8 port)2075 static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
2076 {
2077 enum ice_ptp_link_spd link_spd;
2078 enum ice_ptp_fec_mode fec_mode;
2079 u64 total_offset, pmd, val;
2080 int err;
2081
2082 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2083 if (err)
2084 return err;
2085
2086 total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2087
2088 /* Read the first Vernier offset from the PHY register and add it to
2089 * the total offset.
2090 */
2091 err = ice_read_64b_phy_reg_e822(hw, port,
2092 P_REG_PAR_PCS_RX_OFFSET_L,
2093 &val);
2094 if (err)
2095 return err;
2096
2097 total_offset += val;
2098
2099 /* For Rx, all multi-lane link speeds include a second Vernier
2100 * calibration, because the lanes might not be aligned.
2101 */
2102 if (link_spd == ICE_PTP_LNK_SPD_40G ||
2103 link_spd == ICE_PTP_LNK_SPD_50G ||
2104 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2105 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2106 err = ice_read_64b_phy_reg_e822(hw, port,
2107 P_REG_PAR_RX_TIME_L,
2108 &val);
2109 if (err)
2110 return err;
2111
2112 total_offset += val;
2113 }
2114
2115 /* In addition, Rx must account for the PMD alignment */
2116 err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
2117 if (err)
2118 return err;
2119
2120 /* For RS-FEC, this adjustment adds delay, but for other modes, it
2121 * subtracts delay.
2122 */
2123 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2124 total_offset += pmd;
2125 else
2126 total_offset -= pmd;
2127
2128 /* Now that the total offset has been calculated, program it to the
2129 * PHY and indicate that the Rx offset is ready. After this,
2130 * timestamps will be enabled.
2131 */
2132 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2133 total_offset);
2134 if (err)
2135 return err;
2136
2137 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2138 if (err)
2139 return err;
2140
2141 return 0;
2142 }
2143
2144 /**
2145 * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode
2146 * @hw: pointer to the HW struct
2147 * @port: the PHY port to configure
2148 *
2149 * Calculate and program the fixed Rx offset, and indicate that the offset is
2150 * ready. This can be used when operating in bypass mode.
2151 */
2152 static int
ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw * hw,u8 port)2153 ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
2154 {
2155 enum ice_ptp_link_spd link_spd;
2156 enum ice_ptp_fec_mode fec_mode;
2157 u64 total_offset;
2158 int err;
2159
2160 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2161 if (err)
2162 return err;
2163
2164 total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2165
2166 /* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L
2167 * register, then indicate that the Rx offset is ready. After this,
2168 * timestamps will be enabled.
2169 *
2170 * Note that this skips including the more precise offsets generated
2171 * by Vernier calibration.
2172 */
2173 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2174 total_offset);
2175 if (err)
2176 return err;
2177
2178 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2179 if (err)
2180 return err;
2181
2182 return 0;
2183 }
2184
2185 /**
2186 * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
2187 * @hw: pointer to the HW struct
2188 * @port: the PHY port to read
2189 * @phy_time: on return, the 64bit PHY timer value
2190 * @phc_time: on return, the lower 64bits of PHC time
2191 *
2192 * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
2193 * timer values.
2194 */
2195 static int
ice_read_phy_and_phc_time_e822(struct ice_hw * hw,u8 port,u64 * phy_time,u64 * phc_time)2196 ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
2197 u64 *phc_time)
2198 {
2199 u64 tx_time, rx_time;
2200 u32 zo, lo;
2201 u8 tmr_idx;
2202 int err;
2203
2204 tmr_idx = ice_get_ptp_src_clock_index(hw);
2205
2206 /* Prepare the PHC timer for a READ_TIME capture command */
2207 ice_ptp_src_cmd(hw, READ_TIME);
2208
2209 /* Prepare the PHY timer for a READ_TIME capture command */
2210 err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
2211 if (err)
2212 return err;
2213
2214 /* Issue the sync to start the READ_TIME capture */
2215 ice_ptp_exec_tmr_cmd(hw);
2216
2217 /* Read the captured PHC time from the shadow time registers */
2218 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2219 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2220 *phc_time = (u64)lo << 32 | zo;
2221
2222 /* Read the captured PHY time from the PHY shadow registers */
2223 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2224 if (err)
2225 return err;
2226
2227 /* If the PHY Tx and Rx timers don't match, log a warning message.
2228 * Note that this should not happen in normal circumstances since the
2229 * driver always programs them together.
2230 */
2231 if (tx_time != rx_time)
2232 dev_warn(ice_hw_to_dev(hw),
2233 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2234 port, (unsigned long long)tx_time,
2235 (unsigned long long)rx_time);
2236
2237 *phy_time = tx_time;
2238
2239 return 0;
2240 }
2241
2242 /**
2243 * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
2244 * @hw: pointer to the HW struct
2245 * @port: the PHY port to synchronize
2246 *
2247 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2248 * This is done by issuing a READ_TIME command which triggers a simultaneous
2249 * read of the PHY timer and PHC timer. Then we use the difference to
2250 * calculate an appropriate 2s complement addition to add to the PHY timer in
2251 * order to ensure it reads the same value as the primary PHC timer.
2252 */
ice_sync_phy_timer_e822(struct ice_hw * hw,u8 port)2253 static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
2254 {
2255 u64 phc_time, phy_time, difference;
2256 int err;
2257
2258 if (!ice_ptp_lock(hw)) {
2259 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2260 return -EBUSY;
2261 }
2262
2263 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2264 if (err)
2265 goto err_unlock;
2266
2267 /* Calculate the amount required to add to the port time in order for
2268 * it to match the PHC time.
2269 *
2270 * Note that the port adjustment is done using 2s complement
2271 * arithmetic. This is convenient since it means that we can simply
2272 * calculate the difference between the PHC time and the port time,
2273 * and it will be interpreted correctly.
2274 */
2275 difference = phc_time - phy_time;
2276
2277 err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
2278 if (err)
2279 goto err_unlock;
2280
2281 err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
2282 if (err)
2283 goto err_unlock;
2284
2285 /* Do not perform any action on the main timer */
2286 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2287
2288 /* Issue the sync to activate the time adjustment */
2289 ice_ptp_exec_tmr_cmd(hw);
2290
2291 /* Re-capture the timer values to flush the command registers and
2292 * verify that the time was properly adjusted.
2293 */
2294 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2295 if (err)
2296 goto err_unlock;
2297
2298 dev_info(ice_hw_to_dev(hw),
2299 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2300 port, (unsigned long long)phy_time,
2301 (unsigned long long)phc_time);
2302
2303 ice_ptp_unlock(hw);
2304
2305 return 0;
2306
2307 err_unlock:
2308 ice_ptp_unlock(hw);
2309 return err;
2310 }
2311
2312 /**
2313 * ice_stop_phy_timer_e822 - Stop the PHY clock timer
2314 * @hw: pointer to the HW struct
2315 * @port: the PHY port to stop
2316 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2317 *
2318 * Stop the clock of a PHY port. This must be done as part of the flow to
2319 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2320 * initialized or when link speed changes.
2321 */
2322 int
ice_stop_phy_timer_e822(struct ice_hw * hw,u8 port,bool soft_reset)2323 ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
2324 {
2325 int err;
2326 u32 val;
2327
2328 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
2329 if (err)
2330 return err;
2331
2332 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
2333 if (err)
2334 return err;
2335
2336 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2337 if (err)
2338 return err;
2339
2340 val &= ~P_REG_PS_START_M;
2341 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2342 if (err)
2343 return err;
2344
2345 val &= ~P_REG_PS_ENA_CLK_M;
2346 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2347 if (err)
2348 return err;
2349
2350 if (soft_reset) {
2351 val |= P_REG_PS_SFT_RESET_M;
2352 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2353 if (err)
2354 return err;
2355 }
2356
2357 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2358
2359 return 0;
2360 }
2361
2362 /**
2363 * ice_start_phy_timer_e822 - Start the PHY clock timer
2364 * @hw: pointer to the HW struct
2365 * @port: the PHY port to start
2366 * @bypass: if true, start the PHY in bypass mode
2367 *
2368 * Start the clock of a PHY port. This must be done as part of the flow to
2369 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2370 * initialized or when link speed changes.
2371 *
2372 * Bypass mode enables timestamps immediately without waiting for Vernier
2373 * calibration to complete. Hardware will still continue taking Vernier
2374 * measurements on Tx or Rx of packets, but they will not be applied to
2375 * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware
2376 * has completed offset calculation.
2377 */
2378 int
ice_start_phy_timer_e822(struct ice_hw * hw,u8 port,bool bypass)2379 ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
2380 {
2381 u32 lo, hi, val;
2382 u64 incval;
2383 u8 tmr_idx;
2384 int err;
2385
2386 tmr_idx = ice_get_ptp_src_clock_index(hw);
2387
2388 err = ice_stop_phy_timer_e822(hw, port, false);
2389 if (err)
2390 return err;
2391
2392 ice_phy_cfg_lane_e822(hw, port);
2393
2394 err = ice_phy_cfg_uix_e822(hw, port);
2395 if (err)
2396 return err;
2397
2398 err = ice_phy_cfg_parpcs_e822(hw, port);
2399 if (err)
2400 return err;
2401
2402 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2403 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2404 incval = (u64)hi << 32 | lo;
2405
2406 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
2407 if (err)
2408 return err;
2409
2410 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2411 if (err)
2412 return err;
2413
2414 /* Do not perform any action on the main timer */
2415 ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2416
2417 ice_ptp_exec_tmr_cmd(hw);
2418
2419 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2420 if (err)
2421 return err;
2422
2423 val |= P_REG_PS_SFT_RESET_M;
2424 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2425 if (err)
2426 return err;
2427
2428 val |= P_REG_PS_START_M;
2429 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2430 if (err)
2431 return err;
2432
2433 val &= ~P_REG_PS_SFT_RESET_M;
2434 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2435 if (err)
2436 return err;
2437
2438 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2439 if (err)
2440 return err;
2441
2442 ice_ptp_exec_tmr_cmd(hw);
2443
2444 val |= P_REG_PS_ENA_CLK_M;
2445 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2446 if (err)
2447 return err;
2448
2449 val |= P_REG_PS_LOAD_OFFSET_M;
2450 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2451 if (err)
2452 return err;
2453
2454 ice_ptp_exec_tmr_cmd(hw);
2455
2456 err = ice_sync_phy_timer_e822(hw, port);
2457 if (err)
2458 return err;
2459
2460 if (bypass) {
2461 val |= P_REG_PS_BYPASS_MODE_M;
2462 /* Enter BYPASS mode, enabling timestamps immediately. */
2463 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2464 if (err)
2465 return err;
2466
2467 /* Program the fixed Tx offset */
2468 err = ice_phy_cfg_fixed_tx_offset_e822(hw, port);
2469 if (err)
2470 return err;
2471
2472 /* Program the fixed Rx offset */
2473 err = ice_phy_cfg_fixed_rx_offset_e822(hw, port);
2474 if (err)
2475 return err;
2476 }
2477
2478 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2479
2480 return 0;
2481 }
2482
2483 /**
2484 * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
2485 * @hw: pointer to the HW struct
2486 * @port: the PHY port to configure
2487 *
2488 * After hardware finishes vernier calculations for the Tx and Rx offset, this
2489 * function can be used to exit bypass mode by updating the total Tx and Rx
2490 * offsets, and then disabling bypass. This will enable hardware to include
2491 * the more precise offset calibrations, increasing precision of the generated
2492 * timestamps.
2493 *
2494 * This cannot be done until hardware has measured the offsets, which requires
2495 * waiting until at least one packet has been sent and received by the device.
2496 */
ice_phy_exit_bypass_e822(struct ice_hw * hw,u8 port)2497 int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
2498 {
2499 int err;
2500 u32 val;
2501
2502 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
2503 if (err) {
2504 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
2505 port, err);
2506 return err;
2507 }
2508
2509 if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2510 ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
2511 port);
2512 return -EBUSY;
2513 }
2514
2515 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
2516 if (err) {
2517 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2518 port, err);
2519 return err;
2520 }
2521
2522 if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2523 ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
2524 port);
2525 return -EBUSY;
2526 }
2527
2528 err = ice_phy_cfg_tx_offset_e822(hw, port);
2529 if (err) {
2530 ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
2531 port, err);
2532 return err;
2533 }
2534
2535 err = ice_phy_cfg_rx_offset_e822(hw, port);
2536 if (err) {
2537 ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
2538 port, err);
2539 return err;
2540 }
2541
2542 /* Exit bypass mode now that the offset has been updated */
2543 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2544 if (err) {
2545 ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n",
2546 port, err);
2547 return err;
2548 }
2549
2550 if (!(val & P_REG_PS_BYPASS_MODE_M))
2551 ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
2552 port);
2553
2554 val &= ~P_REG_PS_BYPASS_MODE_M;
2555 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2556 if (err) {
2557 ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n",
2558 port, err);
2559 return err;
2560 }
2561
2562 dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n",
2563 port);
2564
2565 return 0;
2566 }
2567
2568 /* E810 functions
2569 *
2570 * The following functions operate on the E810 series devices which use
2571 * a separate external PHY.
2572 */
2573
2574 /**
2575 * ice_read_phy_reg_e810 - Read register from external PHY on E810
2576 * @hw: pointer to the HW struct
2577 * @addr: the address to read from
2578 * @val: On return, the value read from the PHY
2579 *
2580 * Read a register from the external PHY on the E810 device.
2581 */
ice_read_phy_reg_e810(struct ice_hw * hw,u32 addr,u32 * val)2582 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2583 {
2584 struct ice_sbq_msg_input msg = {0};
2585 int err;
2586
2587 msg.msg_addr_low = lower_16_bits(addr);
2588 msg.msg_addr_high = upper_16_bits(addr);
2589 msg.opcode = ice_sbq_msg_rd;
2590 msg.dest_dev = rmn_0;
2591
2592 err = ice_sbq_rw_reg(hw, &msg);
2593 if (err) {
2594 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2595 err);
2596 return err;
2597 }
2598
2599 *val = msg.data;
2600
2601 return 0;
2602 }
2603
2604 /**
2605 * ice_write_phy_reg_e810 - Write register on external PHY on E810
2606 * @hw: pointer to the HW struct
2607 * @addr: the address to writem to
2608 * @val: the value to write to the PHY
2609 *
2610 * Write a value to a register of the external PHY on the E810 device.
2611 */
ice_write_phy_reg_e810(struct ice_hw * hw,u32 addr,u32 val)2612 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2613 {
2614 struct ice_sbq_msg_input msg = {0};
2615 int err;
2616
2617 msg.msg_addr_low = lower_16_bits(addr);
2618 msg.msg_addr_high = upper_16_bits(addr);
2619 msg.opcode = ice_sbq_msg_wr;
2620 msg.dest_dev = rmn_0;
2621 msg.data = val;
2622
2623 err = ice_sbq_rw_reg(hw, &msg);
2624 if (err) {
2625 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2626 err);
2627 return err;
2628 }
2629
2630 return 0;
2631 }
2632
2633 /**
2634 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2635 * @hw: pointer to the HW struct
2636 * @idx: the timestamp index to read
2637 * @hi: 8 bit timestamp high value
2638 * @lo: 32 bit timestamp low value
2639 *
2640 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2641 * timestamp block of the external PHY on the E810 device using the low latency
2642 * timestamp read.
2643 */
2644 static int
ice_read_phy_tstamp_ll_e810(struct ice_hw * hw,u8 idx,u8 * hi,u32 * lo)2645 ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2646 {
2647 u32 val;
2648 u8 i;
2649
2650 /* Write TS index to read to the PF register so the FW can read it */
2651 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2652 wr32(hw, PF_SB_ATQBAL, val);
2653
2654 /* Read the register repeatedly until the FW provides us the TS */
2655 for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2656 val = rd32(hw, PF_SB_ATQBAL);
2657
2658 /* When the bit is cleared, the TS is ready in the register */
2659 if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2660 /* High 8 bit value of the TS is on the bits 16:23 */
2661 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2662
2663 /* Read the low 32 bit value and set the TS valid bit */
2664 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2665 return 0;
2666 }
2667
2668 udelay(10);
2669 }
2670
2671 /* FW failed to provide the TS in time */
2672 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2673 return -EINVAL;
2674 }
2675
2676 /**
2677 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2678 * @hw: pointer to the HW struct
2679 * @lport: the lport to read from
2680 * @idx: the timestamp index to read
2681 * @hi: 8 bit timestamp high value
2682 * @lo: 32 bit timestamp low value
2683 *
2684 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2685 * timestamp block of the external PHY on the E810 device using sideband queue.
2686 */
2687 static int
ice_read_phy_tstamp_sbq_e810(struct ice_hw * hw,u8 lport,u8 idx,u8 * hi,u32 * lo)2688 ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2689 u32 *lo)
2690 {
2691 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2692 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2693 u32 lo_val, hi_val;
2694 int err;
2695
2696 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2697 if (err) {
2698 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2699 err);
2700 return err;
2701 }
2702
2703 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2704 if (err) {
2705 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2706 err);
2707 return err;
2708 }
2709
2710 *lo = lo_val;
2711 *hi = (u8)hi_val;
2712
2713 return 0;
2714 }
2715
2716 /**
2717 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2718 * @hw: pointer to the HW struct
2719 * @lport: the lport to read from
2720 * @idx: the timestamp index to read
2721 * @tstamp: on return, the 40bit timestamp value
2722 *
2723 * Read a 40bit timestamp value out of the timestamp block of the external PHY
2724 * on the E810 device.
2725 */
2726 static int
ice_read_phy_tstamp_e810(struct ice_hw * hw,u8 lport,u8 idx,u64 * tstamp)2727 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2728 {
2729 u32 lo = 0;
2730 u8 hi = 0;
2731 int err;
2732
2733 if (hw->dev_caps.ts_dev_info.ts_ll_read)
2734 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2735 else
2736 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2737
2738 if (err)
2739 return err;
2740
2741 /* For E810 devices, the timestamp is reported with the lower 32 bits
2742 * in the low register, and the upper 8 bits in the high register.
2743 */
2744 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2745
2746 return 0;
2747 }
2748
2749 /**
2750 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2751 * @hw: pointer to the HW struct
2752 * @lport: the lport to read from
2753 * @idx: the timestamp index to reset
2754 *
2755 * Clear a timestamp, resetting its valid bit, from the timestamp block of the
2756 * external PHY on the E810 device.
2757 */
ice_clear_phy_tstamp_e810(struct ice_hw * hw,u8 lport,u8 idx)2758 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2759 {
2760 u32 lo_addr, hi_addr;
2761 int err;
2762
2763 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2764 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2765
2766 err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2767 if (err) {
2768 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
2769 err);
2770 return err;
2771 }
2772
2773 err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2774 if (err) {
2775 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
2776 err);
2777 return err;
2778 }
2779
2780 return 0;
2781 }
2782
2783 /**
2784 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2785 * @hw: pointer to HW struct
2786 *
2787 * Enable the timesync PTP functionality for the external PHY connected to
2788 * this function.
2789 */
ice_ptp_init_phy_e810(struct ice_hw * hw)2790 int ice_ptp_init_phy_e810(struct ice_hw *hw)
2791 {
2792 u8 tmr_idx;
2793 int err;
2794
2795 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2796 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2797 GLTSYN_ENA_TSYN_ENA_M);
2798 if (err)
2799 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2800 err);
2801
2802 return err;
2803 }
2804
2805 /**
2806 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2807 * @hw: pointer to HW struct
2808 *
2809 * Perform E810-specific PTP hardware clock initialization steps.
2810 */
ice_ptp_init_phc_e810(struct ice_hw * hw)2811 static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2812 {
2813 /* Ensure synchronization delay is zero */
2814 wr32(hw, GLTSYN_SYNC_DLAY, 0);
2815
2816 /* Initialize the PHY */
2817 return ice_ptp_init_phy_e810(hw);
2818 }
2819
2820 /**
2821 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2822 * @hw: Board private structure
2823 * @time: Time to initialize the PHY port clock to
2824 *
2825 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2826 * initial clock time. The time will not actually be programmed until the
2827 * driver issues an INIT_TIME command.
2828 *
2829 * The time value is the upper 32 bits of the PHY timer, usually in units of
2830 * nominal nanoseconds.
2831 */
ice_ptp_prep_phy_time_e810(struct ice_hw * hw,u32 time)2832 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2833 {
2834 u8 tmr_idx;
2835 int err;
2836
2837 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2838 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2839 if (err) {
2840 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2841 err);
2842 return err;
2843 }
2844
2845 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2846 if (err) {
2847 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2848 err);
2849 return err;
2850 }
2851
2852 return 0;
2853 }
2854
2855 /**
2856 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2857 * @hw: pointer to HW struct
2858 * @adj: adjustment value to program
2859 *
2860 * Prepare the PHY port for an atomic adjustment by programming the PHY
2861 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2862 * is completed by issuing an ADJ_TIME sync command.
2863 *
2864 * The adjustment value only contains the portion used for the upper 32bits of
2865 * the PHY timer, usually in units of nominal nanoseconds. Negative
2866 * adjustments are supported using 2s complement arithmetic.
2867 */
ice_ptp_prep_phy_adj_e810(struct ice_hw * hw,s32 adj)2868 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2869 {
2870 u8 tmr_idx;
2871 int err;
2872
2873 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2874
2875 /* Adjustments are represented as signed 2's complement values in
2876 * nanoseconds. Sub-nanosecond adjustment is not supported.
2877 */
2878 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2879 if (err) {
2880 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2881 err);
2882 return err;
2883 }
2884
2885 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2886 if (err) {
2887 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2888 err);
2889 return err;
2890 }
2891
2892 return 0;
2893 }
2894
2895 /**
2896 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2897 * @hw: pointer to HW struct
2898 * @incval: The new 40bit increment value to prepare
2899 *
2900 * Prepare the PHY port for a new increment value by programming the PHY
2901 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2902 * completed by issuing an INIT_INCVAL command.
2903 */
ice_ptp_prep_phy_incval_e810(struct ice_hw * hw,u64 incval)2904 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2905 {
2906 u32 high, low;
2907 u8 tmr_idx;
2908 int err;
2909
2910 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2911 low = lower_32_bits(incval);
2912 high = upper_32_bits(incval);
2913
2914 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2915 if (err) {
2916 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2917 err);
2918 return err;
2919 }
2920
2921 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
2922 if (err) {
2923 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
2924 err);
2925 return err;
2926 }
2927
2928 return 0;
2929 }
2930
2931 /**
2932 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
2933 * @hw: pointer to HW struct
2934 * @cmd: Command to be sent to the port
2935 *
2936 * Prepare the external PHYs connected to this device for a timer sync
2937 * command.
2938 */
ice_ptp_port_cmd_e810(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)2939 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2940 {
2941 u32 cmd_val, val;
2942 int err;
2943
2944 switch (cmd) {
2945 case INIT_TIME:
2946 cmd_val = GLTSYN_CMD_INIT_TIME;
2947 break;
2948 case INIT_INCVAL:
2949 cmd_val = GLTSYN_CMD_INIT_INCVAL;
2950 break;
2951 case ADJ_TIME:
2952 cmd_val = GLTSYN_CMD_ADJ_TIME;
2953 break;
2954 case READ_TIME:
2955 cmd_val = GLTSYN_CMD_READ_TIME;
2956 break;
2957 case ADJ_TIME_AT_TIME:
2958 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
2959 break;
2960 case ICE_PTP_NOP:
2961 return 0;
2962 }
2963
2964 /* Read, modify, write */
2965 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
2966 if (err) {
2967 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
2968 return err;
2969 }
2970
2971 /* Modify necessary bits only and perform write */
2972 val &= ~TS_CMD_MASK_E810;
2973 val |= cmd_val;
2974
2975 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
2976 if (err) {
2977 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
2978 return err;
2979 }
2980
2981 return 0;
2982 }
2983
2984 /* Device agnostic functions
2985 *
2986 * The following functions implement shared behavior common to both E822 and
2987 * E810 devices, possibly calling a device specific implementation where
2988 * necessary.
2989 */
2990
2991 /**
2992 * ice_ptp_lock - Acquire PTP global semaphore register lock
2993 * @hw: pointer to the HW struct
2994 *
2995 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
2996 * was acquired, false otherwise.
2997 *
2998 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
2999 * value. If software sees the busy bit cleared, this means that this function
3000 * acquired the lock (and the busy bit is now set). If software sees the busy
3001 * bit set, it means that another function acquired the lock.
3002 *
3003 * Software must clear the busy bit with a write to release the lock for other
3004 * functions when done.
3005 */
ice_ptp_lock(struct ice_hw * hw)3006 bool ice_ptp_lock(struct ice_hw *hw)
3007 {
3008 u32 hw_lock;
3009 int i;
3010
3011 #define MAX_TRIES 5
3012
3013 for (i = 0; i < MAX_TRIES; i++) {
3014 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
3015 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
3016 if (!hw_lock)
3017 break;
3018
3019 /* Somebody is holding the lock */
3020 usleep_range(10000, 20000);
3021 }
3022
3023 return !hw_lock;
3024 }
3025
3026 /**
3027 * ice_ptp_unlock - Release PTP global semaphore register lock
3028 * @hw: pointer to the HW struct
3029 *
3030 * Release the global PTP hardware semaphore lock. This is done by writing to
3031 * the PFTSYN_SEM register.
3032 */
ice_ptp_unlock(struct ice_hw * hw)3033 void ice_ptp_unlock(struct ice_hw *hw)
3034 {
3035 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
3036 }
3037
3038 /**
3039 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
3040 * @hw: pointer to HW struct
3041 * @cmd: the command to issue
3042 *
3043 * Prepare the source timer and PHY timers and then trigger the requested
3044 * command. This causes the shadow registers previously written in preparation
3045 * for the command to be synchronously applied to both the source and PHY
3046 * timers.
3047 */
ice_ptp_tmr_cmd(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)3048 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3049 {
3050 int err;
3051
3052 /* First, prepare the source timer */
3053 ice_ptp_src_cmd(hw, cmd);
3054
3055 /* Next, prepare the ports */
3056 if (ice_is_e810(hw))
3057 err = ice_ptp_port_cmd_e810(hw, cmd);
3058 else
3059 err = ice_ptp_port_cmd_e822(hw, cmd);
3060 if (err) {
3061 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
3062 cmd, err);
3063 return err;
3064 }
3065
3066 /* Write the sync command register to drive both source and PHY timer
3067 * commands synchronously
3068 */
3069 ice_ptp_exec_tmr_cmd(hw);
3070
3071 return 0;
3072 }
3073
3074 /**
3075 * ice_ptp_init_time - Initialize device time to provided value
3076 * @hw: pointer to HW struct
3077 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
3078 *
3079 * Initialize the device to the specified time provided. This requires a three
3080 * step process:
3081 *
3082 * 1) write the new init time to the source timer shadow registers
3083 * 2) write the new init time to the PHY timer shadow registers
3084 * 3) issue an init_time timer command to synchronously switch both the source
3085 * and port timers to the new init time value at the next clock cycle.
3086 */
ice_ptp_init_time(struct ice_hw * hw,u64 time)3087 int ice_ptp_init_time(struct ice_hw *hw, u64 time)
3088 {
3089 u8 tmr_idx;
3090 int err;
3091
3092 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3093
3094 /* Source timers */
3095 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
3096 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
3097 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
3098
3099 /* PHY timers */
3100 /* Fill Rx and Tx ports and send msg to PHY */
3101 if (ice_is_e810(hw))
3102 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
3103 else
3104 err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
3105 if (err)
3106 return err;
3107
3108 return ice_ptp_tmr_cmd(hw, INIT_TIME);
3109 }
3110
3111 /**
3112 * ice_ptp_write_incval - Program PHC with new increment value
3113 * @hw: pointer to HW struct
3114 * @incval: Source timer increment value per clock cycle
3115 *
3116 * Program the PHC with a new increment value. This requires a three-step
3117 * process:
3118 *
3119 * 1) Write the increment value to the source timer shadow registers
3120 * 2) Write the increment value to the PHY timer shadow registers
3121 * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
3122 * source and port timers to the new increment value at the next clock
3123 * cycle.
3124 */
ice_ptp_write_incval(struct ice_hw * hw,u64 incval)3125 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3126 {
3127 u8 tmr_idx;
3128 int err;
3129
3130 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3131
3132 /* Shadow Adjust */
3133 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3134 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3135
3136 if (ice_is_e810(hw))
3137 err = ice_ptp_prep_phy_incval_e810(hw, incval);
3138 else
3139 err = ice_ptp_prep_phy_incval_e822(hw, incval);
3140 if (err)
3141 return err;
3142
3143 return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
3144 }
3145
3146 /**
3147 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3148 * @hw: pointer to HW struct
3149 * @incval: Source timer increment value per clock cycle
3150 *
3151 * Program a new PHC incval while holding the PTP semaphore.
3152 */
ice_ptp_write_incval_locked(struct ice_hw * hw,u64 incval)3153 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3154 {
3155 int err;
3156
3157 if (!ice_ptp_lock(hw))
3158 return -EBUSY;
3159
3160 err = ice_ptp_write_incval(hw, incval);
3161
3162 ice_ptp_unlock(hw);
3163
3164 return err;
3165 }
3166
3167 /**
3168 * ice_ptp_adj_clock - Adjust PHC clock time atomically
3169 * @hw: pointer to HW struct
3170 * @adj: Adjustment in nanoseconds
3171 *
3172 * Perform an atomic adjustment of the PHC time by the specified number of
3173 * nanoseconds. This requires a three-step process:
3174 *
3175 * 1) Write the adjustment to the source timer shadow registers
3176 * 2) Write the adjustment to the PHY timer shadow registers
3177 * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
3178 * both the source and port timers at the next clock cycle.
3179 */
ice_ptp_adj_clock(struct ice_hw * hw,s32 adj)3180 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3181 {
3182 u8 tmr_idx;
3183 int err;
3184
3185 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3186
3187 /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3188 * For an ADJ_TIME command, this set of registers represents the value
3189 * to add to the clock time. It supports subtraction by interpreting
3190 * the value as a 2's complement integer.
3191 */
3192 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3193 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3194
3195 if (ice_is_e810(hw))
3196 err = ice_ptp_prep_phy_adj_e810(hw, adj);
3197 else
3198 err = ice_ptp_prep_phy_adj_e822(hw, adj);
3199 if (err)
3200 return err;
3201
3202 return ice_ptp_tmr_cmd(hw, ADJ_TIME);
3203 }
3204
3205 /**
3206 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3207 * @hw: pointer to the HW struct
3208 * @block: the block to read from
3209 * @idx: the timestamp index to read
3210 * @tstamp: on return, the 40bit timestamp value
3211 *
3212 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3213 * the block is the quad to read from. For E810 devices, the block is the
3214 * logical port to read from.
3215 */
ice_read_phy_tstamp(struct ice_hw * hw,u8 block,u8 idx,u64 * tstamp)3216 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3217 {
3218 if (ice_is_e810(hw))
3219 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3220 else
3221 return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
3222 }
3223
3224 /**
3225 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3226 * @hw: pointer to the HW struct
3227 * @block: the block to read from
3228 * @idx: the timestamp index to reset
3229 *
3230 * Clear a timestamp, resetting its valid bit, from the timestamp block. For
3231 * E822 devices, the block is the quad to clear from. For E810 devices, the
3232 * block is the logical port to clear from.
3233 */
ice_clear_phy_tstamp(struct ice_hw * hw,u8 block,u8 idx)3234 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3235 {
3236 if (ice_is_e810(hw))
3237 return ice_clear_phy_tstamp_e810(hw, block, idx);
3238 else
3239 return ice_clear_phy_tstamp_e822(hw, block, idx);
3240 }
3241
3242 /* E810T SMA functions
3243 *
3244 * The following functions operate specifically on E810T hardware and are used
3245 * to access the extended GPIOs available.
3246 */
3247
3248 /**
3249 * ice_get_pca9575_handle
3250 * @hw: pointer to the hw struct
3251 * @pca9575_handle: GPIO controller's handle
3252 *
3253 * Find and return the GPIO controller's handle in the netlist.
3254 * When found - the value will be cached in the hw structure and following calls
3255 * will return cached value
3256 */
3257 static int
ice_get_pca9575_handle(struct ice_hw * hw,u16 * pca9575_handle)3258 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3259 {
3260 struct ice_aqc_get_link_topo *cmd;
3261 struct ice_aq_desc desc;
3262 int status;
3263 u8 idx;
3264
3265 /* If handle was read previously return cached value */
3266 if (hw->io_expander_handle) {
3267 *pca9575_handle = hw->io_expander_handle;
3268 return 0;
3269 }
3270
3271 /* If handle was not detected read it from the netlist */
3272 cmd = &desc.params.get_link_topo;
3273 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3274
3275 /* Set node type to GPIO controller */
3276 cmd->addr.topo_params.node_type_ctx =
3277 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3278 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3279
3280 #define SW_PCA9575_SFP_TOPO_IDX 2
3281 #define SW_PCA9575_QSFP_TOPO_IDX 1
3282
3283 /* Check if the SW IO expander controlling SMA exists in the netlist. */
3284 if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3285 idx = SW_PCA9575_SFP_TOPO_IDX;
3286 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3287 idx = SW_PCA9575_QSFP_TOPO_IDX;
3288 else
3289 return -EOPNOTSUPP;
3290
3291 cmd->addr.topo_params.index = idx;
3292
3293 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3294 if (status)
3295 return -EOPNOTSUPP;
3296
3297 /* Verify if we found the right IO expander type */
3298 if (desc.params.get_link_topo.node_part_num !=
3299 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3300 return -EOPNOTSUPP;
3301
3302 /* If present save the handle and return it */
3303 hw->io_expander_handle =
3304 le16_to_cpu(desc.params.get_link_topo.addr.handle);
3305 *pca9575_handle = hw->io_expander_handle;
3306
3307 return 0;
3308 }
3309
3310 /**
3311 * ice_read_sma_ctrl_e810t
3312 * @hw: pointer to the hw struct
3313 * @data: pointer to data to be read from the GPIO controller
3314 *
3315 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3316 * PCA9575 expander, so only bits 3-7 in data are valid.
3317 */
ice_read_sma_ctrl_e810t(struct ice_hw * hw,u8 * data)3318 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3319 {
3320 int status;
3321 u16 handle;
3322 u8 i;
3323
3324 status = ice_get_pca9575_handle(hw, &handle);
3325 if (status)
3326 return status;
3327
3328 *data = 0;
3329
3330 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3331 bool pin;
3332
3333 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3334 &pin, NULL);
3335 if (status)
3336 break;
3337 *data |= (u8)(!pin) << i;
3338 }
3339
3340 return status;
3341 }
3342
3343 /**
3344 * ice_write_sma_ctrl_e810t
3345 * @hw: pointer to the hw struct
3346 * @data: data to be written to the GPIO controller
3347 *
3348 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3349 * of the PCA9575 expander, so only bits 3-7 in data are valid.
3350 */
ice_write_sma_ctrl_e810t(struct ice_hw * hw,u8 data)3351 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3352 {
3353 int status;
3354 u16 handle;
3355 u8 i;
3356
3357 status = ice_get_pca9575_handle(hw, &handle);
3358 if (status)
3359 return status;
3360
3361 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3362 bool pin;
3363
3364 pin = !(data & (1 << i));
3365 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3366 pin, NULL);
3367 if (status)
3368 break;
3369 }
3370
3371 return status;
3372 }
3373
3374 /**
3375 * ice_read_pca9575_reg_e810t
3376 * @hw: pointer to the hw struct
3377 * @offset: GPIO controller register offset
3378 * @data: pointer to data to be read from the GPIO controller
3379 *
3380 * Read the register from the GPIO controller
3381 */
ice_read_pca9575_reg_e810t(struct ice_hw * hw,u8 offset,u8 * data)3382 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3383 {
3384 struct ice_aqc_link_topo_addr link_topo;
3385 __le16 addr;
3386 u16 handle;
3387 int err;
3388
3389 memset(&link_topo, 0, sizeof(link_topo));
3390
3391 err = ice_get_pca9575_handle(hw, &handle);
3392 if (err)
3393 return err;
3394
3395 link_topo.handle = cpu_to_le16(handle);
3396 link_topo.topo_params.node_type_ctx =
3397 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3398 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3399
3400 addr = cpu_to_le16((u16)offset);
3401
3402 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3403 }
3404
3405 /**
3406 * ice_is_pca9575_present
3407 * @hw: pointer to the hw struct
3408 *
3409 * Check if the SW IO expander is present in the netlist
3410 */
ice_is_pca9575_present(struct ice_hw * hw)3411 bool ice_is_pca9575_present(struct ice_hw *hw)
3412 {
3413 u16 handle = 0;
3414 int status;
3415
3416 if (!ice_is_e810t(hw))
3417 return false;
3418
3419 status = ice_get_pca9575_handle(hw, &handle);
3420
3421 return !status && handle;
3422 }
3423
3424 /**
3425 * ice_ptp_init_phc - Initialize PTP hardware clock
3426 * @hw: pointer to the HW struct
3427 *
3428 * Perform the steps required to initialize the PTP hardware clock.
3429 */
ice_ptp_init_phc(struct ice_hw * hw)3430 int ice_ptp_init_phc(struct ice_hw *hw)
3431 {
3432 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3433
3434 /* Enable source clocks */
3435 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3436
3437 /* Clear event err indications for auxiliary pins */
3438 (void)rd32(hw, GLTSYN_STAT(src_idx));
3439
3440 if (ice_is_e810(hw))
3441 return ice_ptp_init_phc_e810(hw);
3442 else
3443 return ice_ptp_init_phc_e822(hw);
3444 }
3445