• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 #define E810_OUT_PROP_DELAY_NS 1
9 
10 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
11 	/* name    idx   func         chan */
12 	{ "GNSS",  GNSS, PTP_PF_EXTTS, 0, { 0, } },
13 	{ "SMA1",  SMA1, PTP_PF_NONE, 1, { 0, } },
14 	{ "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
15 	{ "SMA2",  SMA2, PTP_PF_NONE, 2, { 0, } },
16 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
17 };
18 
ice_get_ctrl_pf(struct ice_pf * pf)19 static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
20 {
21 	return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
22 }
23 
ice_get_ctrl_ptp(struct ice_pf * pf)24 static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
25 {
26 	struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
27 
28 	return !ctrl_pf ? NULL : &ctrl_pf->ptp;
29 }
30 
31 /**
32  * ice_get_sma_config_e810t
33  * @hw: pointer to the hw struct
34  * @ptp_pins: pointer to the ptp_pin_desc struture
35  *
36  * Read the configuration of the SMA control logic and put it into the
37  * ptp_pin_desc structure
38  */
39 static int
ice_get_sma_config_e810t(struct ice_hw * hw,struct ptp_pin_desc * ptp_pins)40 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
41 {
42 	u8 data, i;
43 	int status;
44 
45 	/* Read initial pin state */
46 	status = ice_read_sma_ctrl_e810t(hw, &data);
47 	if (status)
48 		return status;
49 
50 	/* initialize with defaults */
51 	for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
52 		strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
53 			sizeof(ptp_pins[i].name));
54 		ptp_pins[i].index = ice_pin_desc_e810t[i].index;
55 		ptp_pins[i].func = ice_pin_desc_e810t[i].func;
56 		ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
57 	}
58 
59 	/* Parse SMA1/UFL1 */
60 	switch (data & ICE_SMA1_MASK_E810T) {
61 	case ICE_SMA1_MASK_E810T:
62 	default:
63 		ptp_pins[SMA1].func = PTP_PF_NONE;
64 		ptp_pins[UFL1].func = PTP_PF_NONE;
65 		break;
66 	case ICE_SMA1_DIR_EN_E810T:
67 		ptp_pins[SMA1].func = PTP_PF_PEROUT;
68 		ptp_pins[UFL1].func = PTP_PF_NONE;
69 		break;
70 	case ICE_SMA1_TX_EN_E810T:
71 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
72 		ptp_pins[UFL1].func = PTP_PF_NONE;
73 		break;
74 	case 0:
75 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
76 		ptp_pins[UFL1].func = PTP_PF_PEROUT;
77 		break;
78 	}
79 
80 	/* Parse SMA2/UFL2 */
81 	switch (data & ICE_SMA2_MASK_E810T) {
82 	case ICE_SMA2_MASK_E810T:
83 	default:
84 		ptp_pins[SMA2].func = PTP_PF_NONE;
85 		ptp_pins[UFL2].func = PTP_PF_NONE;
86 		break;
87 	case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
88 		ptp_pins[SMA2].func = PTP_PF_EXTTS;
89 		ptp_pins[UFL2].func = PTP_PF_NONE;
90 		break;
91 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
92 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
93 		ptp_pins[UFL2].func = PTP_PF_NONE;
94 		break;
95 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
96 		ptp_pins[SMA2].func = PTP_PF_NONE;
97 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
98 		break;
99 	case ICE_SMA2_DIR_EN_E810T:
100 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
101 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
102 		break;
103 	}
104 
105 	return 0;
106 }
107 
108 /**
109  * ice_ptp_set_sma_config_e810t
110  * @hw: pointer to the hw struct
111  * @ptp_pins: pointer to the ptp_pin_desc struture
112  *
113  * Set the configuration of the SMA control logic based on the configuration in
114  * num_pins parameter
115  */
116 static int
ice_ptp_set_sma_config_e810t(struct ice_hw * hw,const struct ptp_pin_desc * ptp_pins)117 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
118 			     const struct ptp_pin_desc *ptp_pins)
119 {
120 	int status;
121 	u8 data;
122 
123 	/* SMA1 and UFL1 cannot be set to TX at the same time */
124 	if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
125 	    ptp_pins[UFL1].func == PTP_PF_PEROUT)
126 		return -EINVAL;
127 
128 	/* SMA2 and UFL2 cannot be set to RX at the same time */
129 	if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
130 	    ptp_pins[UFL2].func == PTP_PF_EXTTS)
131 		return -EINVAL;
132 
133 	/* Read initial pin state value */
134 	status = ice_read_sma_ctrl_e810t(hw, &data);
135 	if (status)
136 		return status;
137 
138 	/* Set the right sate based on the desired configuration */
139 	data &= ~ICE_SMA1_MASK_E810T;
140 	if (ptp_pins[SMA1].func == PTP_PF_NONE &&
141 	    ptp_pins[UFL1].func == PTP_PF_NONE) {
142 		dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
143 		data |= ICE_SMA1_MASK_E810T;
144 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
145 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
146 		dev_info(ice_hw_to_dev(hw), "SMA1 RX");
147 		data |= ICE_SMA1_TX_EN_E810T;
148 	} else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
149 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
150 		/* U.FL 1 TX will always enable SMA 1 RX */
151 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
152 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
153 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
154 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
155 	} else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
156 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
157 		dev_info(ice_hw_to_dev(hw), "SMA1 TX");
158 		data |= ICE_SMA1_DIR_EN_E810T;
159 	}
160 
161 	data &= ~ICE_SMA2_MASK_E810T;
162 	if (ptp_pins[SMA2].func == PTP_PF_NONE &&
163 	    ptp_pins[UFL2].func == PTP_PF_NONE) {
164 		dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
165 		data |= ICE_SMA2_MASK_E810T;
166 	} else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
167 			ptp_pins[UFL2].func == PTP_PF_NONE) {
168 		dev_info(ice_hw_to_dev(hw), "SMA2 RX");
169 		data |= (ICE_SMA2_TX_EN_E810T |
170 			 ICE_SMA2_UFL2_RX_DIS_E810T);
171 	} else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
172 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
173 		dev_info(ice_hw_to_dev(hw), "UFL2 RX");
174 		data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
175 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
176 		   ptp_pins[UFL2].func == PTP_PF_NONE) {
177 		dev_info(ice_hw_to_dev(hw), "SMA2 TX");
178 		data |= (ICE_SMA2_DIR_EN_E810T |
179 			 ICE_SMA2_UFL2_RX_DIS_E810T);
180 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
181 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
182 		dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
183 		data |= ICE_SMA2_DIR_EN_E810T;
184 	}
185 
186 	return ice_write_sma_ctrl_e810t(hw, data);
187 }
188 
189 /**
190  * ice_ptp_set_sma_e810t
191  * @info: the driver's PTP info structure
192  * @pin: pin index in kernel structure
193  * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
194  *
195  * Set the configuration of a single SMA pin
196  */
197 static int
ice_ptp_set_sma_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func)198 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
199 		      enum ptp_pin_function func)
200 {
201 	struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
202 	struct ice_pf *pf = ptp_info_to_pf(info);
203 	struct ice_hw *hw = &pf->hw;
204 	int err;
205 
206 	if (pin < SMA1 || func > PTP_PF_PEROUT)
207 		return -EOPNOTSUPP;
208 
209 	err = ice_get_sma_config_e810t(hw, ptp_pins);
210 	if (err)
211 		return err;
212 
213 	/* Disable the same function on the other pin sharing the channel */
214 	if (pin == SMA1 && ptp_pins[UFL1].func == func)
215 		ptp_pins[UFL1].func = PTP_PF_NONE;
216 	if (pin == UFL1 && ptp_pins[SMA1].func == func)
217 		ptp_pins[SMA1].func = PTP_PF_NONE;
218 
219 	if (pin == SMA2 && ptp_pins[UFL2].func == func)
220 		ptp_pins[UFL2].func = PTP_PF_NONE;
221 	if (pin == UFL2 && ptp_pins[SMA2].func == func)
222 		ptp_pins[SMA2].func = PTP_PF_NONE;
223 
224 	/* Set up new pin function in the temp table */
225 	ptp_pins[pin].func = func;
226 
227 	return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
228 }
229 
230 /**
231  * ice_verify_pin_e810t
232  * @info: the driver's PTP info structure
233  * @pin: Pin index
234  * @func: Assigned function
235  * @chan: Assigned channel
236  *
237  * Verify if pin supports requested pin function. If the Check pins consistency.
238  * Reconfigure the SMA logic attached to the given pin to enable its
239  * desired functionality
240  */
241 static int
ice_verify_pin_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)242 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
243 		     enum ptp_pin_function func, unsigned int chan)
244 {
245 	/* Don't allow channel reassignment */
246 	if (chan != ice_pin_desc_e810t[pin].chan)
247 		return -EOPNOTSUPP;
248 
249 	/* Check if functions are properly assigned */
250 	switch (func) {
251 	case PTP_PF_NONE:
252 		break;
253 	case PTP_PF_EXTTS:
254 		if (pin == UFL1)
255 			return -EOPNOTSUPP;
256 		break;
257 	case PTP_PF_PEROUT:
258 		if (pin == UFL2 || pin == GNSS)
259 			return -EOPNOTSUPP;
260 		break;
261 	case PTP_PF_PHYSYNC:
262 		return -EOPNOTSUPP;
263 	}
264 
265 	return ice_ptp_set_sma_e810t(info, pin, func);
266 }
267 
268 /**
269  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
270  * @pf: Board private structure
271  *
272  * Program the device to respond appropriately to the Tx timestamp interrupt
273  * cause.
274  */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)275 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
276 {
277 	struct ice_hw *hw = &pf->hw;
278 	bool enable;
279 	u32 val;
280 
281 	switch (pf->ptp.tx_interrupt_mode) {
282 	case ICE_PTP_TX_INTERRUPT_ALL:
283 		/* React to interrupts across all quads. */
284 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
285 		enable = true;
286 		break;
287 	case ICE_PTP_TX_INTERRUPT_NONE:
288 		/* Do not react to interrupts on any quad. */
289 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
290 		enable = false;
291 		break;
292 	case ICE_PTP_TX_INTERRUPT_SELF:
293 	default:
294 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
295 		break;
296 	}
297 
298 	/* Configure the Tx timestamp interrupt */
299 	val = rd32(hw, PFINT_OICR_ENA);
300 	if (enable)
301 		val |= PFINT_OICR_TSYN_TX_M;
302 	else
303 		val &= ~PFINT_OICR_TSYN_TX_M;
304 	wr32(hw, PFINT_OICR_ENA, val);
305 }
306 
307 /**
308  * ice_set_rx_tstamp - Enable or disable Rx timestamping
309  * @pf: The PF pointer to search in
310  * @on: bool value for whether timestamps are enabled or disabled
311  */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)312 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
313 {
314 	struct ice_vsi *vsi;
315 	u16 i;
316 
317 	vsi = ice_get_main_vsi(pf);
318 	if (!vsi || !vsi->rx_rings)
319 		return;
320 
321 	/* Set the timestamp flag for all the Rx rings */
322 	ice_for_each_rxq(vsi, i) {
323 		if (!vsi->rx_rings[i])
324 			continue;
325 		vsi->rx_rings[i]->ptp_rx = on;
326 	}
327 }
328 
329 /**
330  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
331  * @pf: Board private structure
332  *
333  * Called during preparation for reset to temporarily disable timestamping on
334  * the device. Called during remove to disable timestamping while cleaning up
335  * driver resources.
336  */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)337 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
338 {
339 	struct ice_hw *hw = &pf->hw;
340 	u32 val;
341 
342 	val = rd32(hw, PFINT_OICR_ENA);
343 	val &= ~PFINT_OICR_TSYN_TX_M;
344 	wr32(hw, PFINT_OICR_ENA, val);
345 
346 	ice_set_rx_tstamp(pf, false);
347 }
348 
349 /**
350  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
351  * @pf: Board private structure
352  *
353  * Called at the end of rebuild to restore timestamp configuration after
354  * a device reset.
355  */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)356 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
357 {
358 	struct ice_hw *hw = &pf->hw;
359 	bool enable_rx;
360 
361 	ice_ptp_cfg_tx_interrupt(pf);
362 
363 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
364 	ice_set_rx_tstamp(pf, enable_rx);
365 
366 	/* Trigger an immediate software interrupt to ensure that timestamps
367 	 * which occurred during reset are handled now.
368 	 */
369 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
370 	ice_flush(hw);
371 }
372 
373 /**
374  * ice_ptp_read_src_clk_reg - Read the source clock register
375  * @pf: Board private structure
376  * @sts: Optional parameter for holding a pair of system timestamps from
377  *       the system clock. Will be ignored if NULL is given.
378  */
379 static u64
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)380 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
381 {
382 	struct ice_hw *hw = &pf->hw;
383 	u32 hi, lo, lo2;
384 	u8 tmr_idx;
385 
386 	tmr_idx = ice_get_ptp_src_clock_index(hw);
387 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
388 	/* Read the system timestamp pre PHC read */
389 	ptp_read_system_prets(sts);
390 
391 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
392 
393 	/* Read the system timestamp post PHC read */
394 	ptp_read_system_postts(sts);
395 
396 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
397 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
398 
399 	if (lo2 < lo) {
400 		/* if TIME_L rolled over read TIME_L again and update
401 		 * system timestamps
402 		 */
403 		ptp_read_system_prets(sts);
404 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
405 		ptp_read_system_postts(sts);
406 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
407 	}
408 
409 	return ((u64)hi << 32) | lo;
410 }
411 
412 /**
413  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
414  * @cached_phc_time: recently cached copy of PHC time
415  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
416  *
417  * Hardware captures timestamps which contain only 32 bits of nominal
418  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
419  * Note that the captured timestamp values may be 40 bits, but the lower
420  * 8 bits are sub-nanoseconds and generally discarded.
421  *
422  * Extend the 32bit nanosecond timestamp using the following algorithm and
423  * assumptions:
424  *
425  * 1) have a recently cached copy of the PHC time
426  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
427  *    seconds) before or after the PHC time was captured.
428  * 3) calculate the delta between the cached time and the timestamp
429  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
430  *    captured after the PHC time. In this case, the full timestamp is just
431  *    the cached PHC time plus the delta.
432  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
433  *    timestamp was captured *before* the PHC time, i.e. because the PHC
434  *    cache was updated after the timestamp was captured by hardware. In this
435  *    case, the full timestamp is the cached time minus the inverse delta.
436  *
437  * This algorithm works even if the PHC time was updated after a Tx timestamp
438  * was requested, but before the Tx timestamp event was reported from
439  * hardware.
440  *
441  * This calculation primarily relies on keeping the cached PHC time up to
442  * date. If the timestamp was captured more than 2^31 nanoseconds after the
443  * PHC time, it is possible that the lower 32bits of PHC time have
444  * overflowed more than once, and we might generate an incorrect timestamp.
445  *
446  * This is prevented by (a) periodically updating the cached PHC time once
447  * a second, and (b) discarding any Tx timestamp packet if it has waited for
448  * a timestamp for more than one second.
449  */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)450 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
451 {
452 	u32 delta, phc_time_lo;
453 	u64 ns;
454 
455 	/* Extract the lower 32 bits of the PHC time */
456 	phc_time_lo = (u32)cached_phc_time;
457 
458 	/* Calculate the delta between the lower 32bits of the cached PHC
459 	 * time and the in_tstamp value
460 	 */
461 	delta = (in_tstamp - phc_time_lo);
462 
463 	/* Do not assume that the in_tstamp is always more recent than the
464 	 * cached PHC time. If the delta is large, it indicates that the
465 	 * in_tstamp was taken in the past, and should be converted
466 	 * forward.
467 	 */
468 	if (delta > (U32_MAX / 2)) {
469 		/* reverse the delta calculation here */
470 		delta = (phc_time_lo - in_tstamp);
471 		ns = cached_phc_time - delta;
472 	} else {
473 		ns = cached_phc_time + delta;
474 	}
475 
476 	return ns;
477 }
478 
479 /**
480  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
481  * @pf: Board private structure
482  * @in_tstamp: Ingress/egress 40b timestamp value
483  *
484  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
485  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
486  *
487  *  *--------------------------------------------------------------*
488  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
489  *  *--------------------------------------------------------------*
490  *
491  * The low bit is an indicator of whether the timestamp is valid. The next
492  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
493  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
494  *
495  * It is assumed that the caller verifies the timestamp is valid prior to
496  * calling this function.
497  *
498  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
499  * time stored in the device private PTP structure as the basis for timestamp
500  * extension.
501  *
502  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
503  * algorithm.
504  */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)505 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
506 {
507 	const u64 mask = GENMASK_ULL(31, 0);
508 	unsigned long discard_time;
509 
510 	/* Discard the hardware timestamp if the cached PHC time is too old */
511 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
512 	if (time_is_before_jiffies(discard_time)) {
513 		pf->ptp.tx_hwtstamp_discarded++;
514 		return 0;
515 	}
516 
517 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
518 				     (in_tstamp >> 8) & mask);
519 }
520 
521 /**
522  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
523  * @tx: the PTP Tx timestamp tracker to check
524  *
525  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
526  * to accept new timestamp requests.
527  *
528  * Assumes the tx->lock spinlock is already held.
529  */
530 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)531 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
532 {
533 	lockdep_assert_held(&tx->lock);
534 
535 	return tx->init && !tx->calibrating;
536 }
537 
538 /**
539  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
540  * @tx: the PTP Tx timestamp tracker
541  * @idx: index of the timestamp to request
542  */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)543 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
544 {
545 	struct ice_ptp_port *ptp_port;
546 	struct sk_buff *skb;
547 	struct ice_pf *pf;
548 
549 	if (!tx->init)
550 		return;
551 
552 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
553 	pf = ptp_port_to_pf(ptp_port);
554 
555 	/* Drop packets which have waited for more than 2 seconds */
556 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
557 		/* Count the number of Tx timestamps that timed out */
558 		pf->ptp.tx_hwtstamp_timeouts++;
559 
560 		skb = tx->tstamps[idx].skb;
561 		tx->tstamps[idx].skb = NULL;
562 		clear_bit(idx, tx->in_use);
563 
564 		dev_kfree_skb_any(skb);
565 		return;
566 	}
567 
568 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
569 
570 	/* Write TS index to read to the PF register so the FW can read it */
571 	wr32(&pf->hw, PF_SB_ATQBAL,
572 	     TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
573 	     TS_LL_READ_TS);
574 	tx->last_ll_ts_idx_read = idx;
575 }
576 
577 /**
578  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
579  * @tx: the PTP Tx timestamp tracker
580  */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)581 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
582 {
583 	struct skb_shared_hwtstamps shhwtstamps = {};
584 	u8 idx = tx->last_ll_ts_idx_read;
585 	struct ice_ptp_port *ptp_port;
586 	u64 raw_tstamp, tstamp;
587 	bool drop_ts = false;
588 	struct sk_buff *skb;
589 	struct ice_pf *pf;
590 	u32 val;
591 
592 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
593 		return;
594 
595 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
596 	pf = ptp_port_to_pf(ptp_port);
597 
598 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
599 
600 	val = rd32(&pf->hw, PF_SB_ATQBAL);
601 
602 	/* When the bit is cleared, the TS is ready in the register */
603 	if (val & TS_LL_READ_TS) {
604 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
605 		return;
606 	}
607 
608 	/* High 8 bit value of the TS is on the bits 16:23 */
609 	raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
610 	raw_tstamp <<= 32;
611 
612 	/* Read the low 32 bit value */
613 	raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
614 
615 	/* Devices using this interface always verify the timestamp differs
616 	 * relative to the last cached timestamp value.
617 	 */
618 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
619 		return;
620 
621 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
622 	clear_bit(idx, tx->in_use);
623 	skb = tx->tstamps[idx].skb;
624 	tx->tstamps[idx].skb = NULL;
625 	if (test_and_clear_bit(idx, tx->stale))
626 		drop_ts = true;
627 
628 	if (!skb)
629 		return;
630 
631 	if (drop_ts) {
632 		dev_kfree_skb_any(skb);
633 		return;
634 	}
635 
636 	/* Extend the timestamp using cached PHC time */
637 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
638 	if (tstamp) {
639 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
640 		ice_trace(tx_tstamp_complete, skb, idx);
641 	}
642 
643 	skb_tstamp_tx(skb, &shhwtstamps);
644 	dev_kfree_skb_any(skb);
645 }
646 
647 /**
648  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
649  * @tx: the PTP Tx timestamp tracker
650  *
651  * Process timestamps captured by the PHY associated with this port. To do
652  * this, loop over each index with a waiting skb.
653  *
654  * If a given index has a valid timestamp, perform the following steps:
655  *
656  * 1) check that the timestamp request is not stale
657  * 2) check that a timestamp is ready and available in the PHY memory bank
658  * 3) read and copy the timestamp out of the PHY register
659  * 4) unlock the index by clearing the associated in_use bit
660  * 5) check if the timestamp is stale, and discard if so
661  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
662  * 7) send this 64 bit timestamp to the stack
663  *
664  * Note that we do not hold the tracking lock while reading the Tx timestamp.
665  * This is because reading the timestamp requires taking a mutex that might
666  * sleep.
667  *
668  * The only place where we set in_use is when a new timestamp is initiated
669  * with a slot index. This is only called in the hard xmit routine where an
670  * SKB has a request flag set. The only places where we clear this bit is this
671  * function, or during teardown when the Tx timestamp tracker is being
672  * removed. A timestamp index will never be re-used until the in_use bit for
673  * that index is cleared.
674  *
675  * If a Tx thread starts a new timestamp, we might not begin processing it
676  * right away but we will notice it at the end when we re-queue the task.
677  *
678  * If a Tx thread starts a new timestamp just after this function exits, the
679  * interrupt for that timestamp should re-trigger this function once
680  * a timestamp is ready.
681  *
682  * In cases where the PTP hardware clock was directly adjusted, some
683  * timestamps may not be able to safely use the timestamp extension math. In
684  * this case, software will set the stale bit for any outstanding Tx
685  * timestamps when the clock is adjusted. Then this function will discard
686  * those captured timestamps instead of sending them to the stack.
687  *
688  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
689  * to correctly extend the timestamp using the cached PHC time. It is
690  * extremely unlikely that a packet will ever take this long to timestamp. If
691  * we detect a Tx timestamp request that has waited for this long we assume
692  * the packet will never be sent by hardware and discard it without reading
693  * the timestamp register.
694  */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)695 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
696 {
697 	struct ice_ptp_port *ptp_port;
698 	unsigned long flags;
699 	struct ice_pf *pf;
700 	struct ice_hw *hw;
701 	u64 tstamp_ready;
702 	bool link_up;
703 	int err;
704 	u8 idx;
705 
706 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
707 	pf = ptp_port_to_pf(ptp_port);
708 	hw = &pf->hw;
709 
710 	/* Read the Tx ready status first */
711 	if (tx->has_ready_bitmap) {
712 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
713 		if (err)
714 			return;
715 	}
716 
717 	/* Drop packets if the link went down */
718 	link_up = ptp_port->link_up;
719 
720 	for_each_set_bit(idx, tx->in_use, tx->len) {
721 		struct skb_shared_hwtstamps shhwtstamps = {};
722 		u8 phy_idx = idx + tx->offset;
723 		u64 raw_tstamp = 0, tstamp;
724 		bool drop_ts = !link_up;
725 		struct sk_buff *skb;
726 
727 		/* Drop packets which have waited for more than 2 seconds */
728 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
729 			drop_ts = true;
730 
731 			/* Count the number of Tx timestamps that timed out */
732 			pf->ptp.tx_hwtstamp_timeouts++;
733 		}
734 
735 		/* Only read a timestamp from the PHY if its marked as ready
736 		 * by the tstamp_ready register. This avoids unnecessary
737 		 * reading of timestamps which are not yet valid. This is
738 		 * important as we must read all timestamps which are valid
739 		 * and only timestamps which are valid during each interrupt.
740 		 * If we do not, the hardware logic for generating a new
741 		 * interrupt can get stuck on some devices.
742 		 */
743 		if (tx->has_ready_bitmap &&
744 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
745 			if (drop_ts)
746 				goto skip_ts_read;
747 
748 			continue;
749 		}
750 
751 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
752 
753 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
754 		if (err && !drop_ts)
755 			continue;
756 
757 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
758 
759 		/* For PHYs which don't implement a proper timestamp ready
760 		 * bitmap, verify that the timestamp value is different
761 		 * from the last cached timestamp. If it is not, skip this for
762 		 * now assuming it hasn't yet been captured by hardware.
763 		 */
764 		if (!drop_ts && !tx->has_ready_bitmap &&
765 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
766 			continue;
767 
768 		/* Discard any timestamp value without the valid bit set */
769 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
770 			drop_ts = true;
771 
772 skip_ts_read:
773 		spin_lock_irqsave(&tx->lock, flags);
774 		if (!tx->has_ready_bitmap && raw_tstamp)
775 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
776 		clear_bit(idx, tx->in_use);
777 		skb = tx->tstamps[idx].skb;
778 		tx->tstamps[idx].skb = NULL;
779 		if (test_and_clear_bit(idx, tx->stale))
780 			drop_ts = true;
781 		spin_unlock_irqrestore(&tx->lock, flags);
782 
783 		/* It is unlikely but possible that the SKB will have been
784 		 * flushed at this point due to link change or teardown.
785 		 */
786 		if (!skb)
787 			continue;
788 
789 		if (drop_ts) {
790 			dev_kfree_skb_any(skb);
791 			continue;
792 		}
793 
794 		/* Extend the timestamp using cached PHC time */
795 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
796 		if (tstamp) {
797 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
798 			ice_trace(tx_tstamp_complete, skb, idx);
799 		}
800 
801 		skb_tstamp_tx(skb, &shhwtstamps);
802 		dev_kfree_skb_any(skb);
803 	}
804 }
805 
806 /**
807  * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
808  * @pf: Board private structure
809  */
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)810 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
811 {
812 	struct ice_ptp_port *port;
813 	unsigned int i;
814 
815 	mutex_lock(&pf->adapter->ports.lock);
816 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
817 		struct ice_ptp_tx *tx = &port->tx;
818 
819 		if (!tx || !tx->init)
820 			continue;
821 
822 		ice_ptp_process_tx_tstamp(tx);
823 	}
824 	mutex_unlock(&pf->adapter->ports.lock);
825 
826 	for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
827 		u64 tstamp_ready;
828 		int err;
829 
830 		/* Read the Tx ready status first */
831 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
832 		if (err)
833 			break;
834 		else if (tstamp_ready)
835 			return ICE_TX_TSTAMP_WORK_PENDING;
836 	}
837 
838 	return ICE_TX_TSTAMP_WORK_DONE;
839 }
840 
841 /**
842  * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
843  * @tx: Tx tracking structure to initialize
844  *
845  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
846  * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
847  */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)848 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
849 {
850 	bool more_timestamps;
851 	unsigned long flags;
852 
853 	if (!tx->init)
854 		return ICE_TX_TSTAMP_WORK_DONE;
855 
856 	/* Process the Tx timestamp tracker */
857 	ice_ptp_process_tx_tstamp(tx);
858 
859 	/* Check if there are outstanding Tx timestamps */
860 	spin_lock_irqsave(&tx->lock, flags);
861 	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
862 	spin_unlock_irqrestore(&tx->lock, flags);
863 
864 	if (more_timestamps)
865 		return ICE_TX_TSTAMP_WORK_PENDING;
866 
867 	return ICE_TX_TSTAMP_WORK_DONE;
868 }
869 
870 /**
871  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
872  * @tx: Tx tracking structure to initialize
873  *
874  * Assumes that the length has already been initialized. Do not call directly,
875  * use the ice_ptp_init_tx_* instead.
876  */
877 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)878 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
879 {
880 	unsigned long *in_use, *stale;
881 	struct ice_tx_tstamp *tstamps;
882 
883 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
884 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
885 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
886 
887 	if (!tstamps || !in_use || !stale) {
888 		kfree(tstamps);
889 		bitmap_free(in_use);
890 		bitmap_free(stale);
891 
892 		return -ENOMEM;
893 	}
894 
895 	tx->tstamps = tstamps;
896 	tx->in_use = in_use;
897 	tx->stale = stale;
898 	tx->init = 1;
899 	tx->last_ll_ts_idx_read = -1;
900 
901 	spin_lock_init(&tx->lock);
902 
903 	return 0;
904 }
905 
906 /**
907  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
908  * @pf: Board private structure
909  * @tx: the tracker to flush
910  *
911  * Called during teardown when a Tx tracker is being removed.
912  */
913 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)914 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
915 {
916 	struct ice_hw *hw = &pf->hw;
917 	unsigned long flags;
918 	u64 tstamp_ready;
919 	int err;
920 	u8 idx;
921 
922 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
923 	if (err) {
924 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
925 			tx->block, err);
926 
927 		/* If we fail to read the Tx timestamp ready bitmap just
928 		 * skip clearing the PHY timestamps.
929 		 */
930 		tstamp_ready = 0;
931 	}
932 
933 	for_each_set_bit(idx, tx->in_use, tx->len) {
934 		u8 phy_idx = idx + tx->offset;
935 		struct sk_buff *skb;
936 
937 		/* In case this timestamp is ready, we need to clear it. */
938 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
939 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
940 
941 		spin_lock_irqsave(&tx->lock, flags);
942 		skb = tx->tstamps[idx].skb;
943 		tx->tstamps[idx].skb = NULL;
944 		clear_bit(idx, tx->in_use);
945 		clear_bit(idx, tx->stale);
946 		spin_unlock_irqrestore(&tx->lock, flags);
947 
948 		/* Count the number of Tx timestamps flushed */
949 		pf->ptp.tx_hwtstamp_flushed++;
950 
951 		/* Free the SKB after we've cleared the bit */
952 		dev_kfree_skb_any(skb);
953 	}
954 }
955 
956 /**
957  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
958  * @tx: the tracker to mark
959  *
960  * Mark currently outstanding Tx timestamps as stale. This prevents sending
961  * their timestamp value to the stack. This is required to prevent extending
962  * the 40bit hardware timestamp incorrectly.
963  *
964  * This should be called when the PTP clock is modified such as after a set
965  * time request.
966  */
967 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)968 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
969 {
970 	unsigned long flags;
971 
972 	spin_lock_irqsave(&tx->lock, flags);
973 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
974 	spin_unlock_irqrestore(&tx->lock, flags);
975 }
976 
977 /**
978  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
979  * @pf: Board private structure
980  *
981  * Called by the clock owner to flush all the Tx timestamp trackers associated
982  * with the clock.
983  */
984 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)985 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
986 {
987 	struct ice_ptp_port *port;
988 
989 	list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
990 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
991 }
992 
993 /**
994  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
995  * @pf: Board private structure
996  * @tx: Tx tracking structure to release
997  *
998  * Free memory associated with the Tx timestamp tracker.
999  */
1000 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)1001 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
1002 {
1003 	unsigned long flags;
1004 
1005 	spin_lock_irqsave(&tx->lock, flags);
1006 	tx->init = 0;
1007 	spin_unlock_irqrestore(&tx->lock, flags);
1008 
1009 	/* wait for potentially outstanding interrupt to complete */
1010 	synchronize_irq(pf->oicr_irq.virq);
1011 
1012 	ice_ptp_flush_tx_tracker(pf, tx);
1013 
1014 	kfree(tx->tstamps);
1015 	tx->tstamps = NULL;
1016 
1017 	bitmap_free(tx->in_use);
1018 	tx->in_use = NULL;
1019 
1020 	bitmap_free(tx->stale);
1021 	tx->stale = NULL;
1022 
1023 	tx->len = 0;
1024 }
1025 
1026 /**
1027  * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
1028  * @pf: Board private structure
1029  * @tx: the Tx tracking structure to initialize
1030  * @port: the port this structure tracks
1031  *
1032  * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
1033  * have independent memory blocks for all ports.
1034  *
1035  * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
1036  */
ice_ptp_init_tx_eth56g(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1037 static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
1038 				  u8 port)
1039 {
1040 	tx->block = port;
1041 	tx->offset = 0;
1042 	tx->len = INDEX_PER_PORT_ETH56G;
1043 	tx->has_ready_bitmap = 1;
1044 
1045 	return ice_ptp_alloc_tx_tracker(tx);
1046 }
1047 
1048 /**
1049  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
1050  * @pf: Board private structure
1051  * @tx: the Tx tracking structure to initialize
1052  * @port: the port this structure tracks
1053  *
1054  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
1055  * the timestamp block is shared for all ports in the same quad. To avoid
1056  * ports using the same timestamp index, logically break the block of
1057  * registers into chunks based on the port number.
1058  */
1059 static int
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1060 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1061 {
1062 	tx->block = ICE_GET_QUAD_NUM(port);
1063 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1064 	tx->len = INDEX_PER_PORT_E82X;
1065 	tx->has_ready_bitmap = 1;
1066 
1067 	return ice_ptp_alloc_tx_tracker(tx);
1068 }
1069 
1070 /**
1071  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1072  * @pf: Board private structure
1073  * @tx: the Tx tracking structure to initialize
1074  *
1075  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1076  * port has its own block of timestamps, independent of the other ports.
1077  */
1078 static int
ice_ptp_init_tx_e810(struct ice_pf * pf,struct ice_ptp_tx * tx)1079 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1080 {
1081 	tx->block = pf->hw.port_info->lport;
1082 	tx->offset = 0;
1083 	tx->len = INDEX_PER_PORT_E810;
1084 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1085 	 * verify new timestamps against cached copy of the last read
1086 	 * timestamp.
1087 	 */
1088 	tx->has_ready_bitmap = 0;
1089 
1090 	return ice_ptp_alloc_tx_tracker(tx);
1091 }
1092 
1093 /**
1094  * ice_ptp_update_cached_phctime - Update the cached PHC time values
1095  * @pf: Board specific private structure
1096  *
1097  * This function updates the system time values which are cached in the PF
1098  * structure and the Rx rings.
1099  *
1100  * This function must be called periodically to ensure that the cached value
1101  * is never more than 2 seconds old.
1102  *
1103  * Note that the cached copy in the PF PTP structure is always updated, even
1104  * if we can't update the copy in the Rx rings.
1105  *
1106  * Return:
1107  * * 0 - OK, successfully updated
1108  * * -EAGAIN - PF was busy, need to reschedule the update
1109  */
ice_ptp_update_cached_phctime(struct ice_pf * pf)1110 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1111 {
1112 	struct device *dev = ice_pf_to_dev(pf);
1113 	unsigned long update_before;
1114 	u64 systime;
1115 	int i;
1116 
1117 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1118 	if (pf->ptp.cached_phc_time &&
1119 	    time_is_before_jiffies(update_before)) {
1120 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1121 
1122 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1123 			 jiffies_to_msecs(time_taken));
1124 		pf->ptp.late_cached_phc_updates++;
1125 	}
1126 
1127 	/* Read the current PHC time */
1128 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
1129 
1130 	/* Update the cached PHC time stored in the PF structure */
1131 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1132 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1133 
1134 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1135 		return -EAGAIN;
1136 
1137 	ice_for_each_vsi(pf, i) {
1138 		struct ice_vsi *vsi = pf->vsi[i];
1139 		int j;
1140 
1141 		if (!vsi)
1142 			continue;
1143 
1144 		if (vsi->type != ICE_VSI_PF)
1145 			continue;
1146 
1147 		ice_for_each_rxq(vsi, j) {
1148 			if (!vsi->rx_rings[j])
1149 				continue;
1150 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1151 		}
1152 	}
1153 	clear_bit(ICE_CFG_BUSY, pf->state);
1154 
1155 	return 0;
1156 }
1157 
1158 /**
1159  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1160  * @pf: Board specific private structure
1161  *
1162  * This function must be called when the cached PHC time is no longer valid,
1163  * such as after a time adjustment. It marks any currently outstanding Tx
1164  * timestamps as stale and updates the cached PHC time for both the PF and Rx
1165  * rings.
1166  *
1167  * If updating the PHC time cannot be done immediately, a warning message is
1168  * logged and the work item is scheduled immediately to minimize the window
1169  * with a wrong cached timestamp.
1170  */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)1171 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1172 {
1173 	struct device *dev = ice_pf_to_dev(pf);
1174 	int err;
1175 
1176 	/* Update the cached PHC time immediately if possible, otherwise
1177 	 * schedule the work item to execute soon.
1178 	 */
1179 	err = ice_ptp_update_cached_phctime(pf);
1180 	if (err) {
1181 		/* If another thread is updating the Rx rings, we won't
1182 		 * properly reset them here. This could lead to reporting of
1183 		 * invalid timestamps, but there isn't much we can do.
1184 		 */
1185 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1186 			 __func__);
1187 
1188 		/* Queue the work item to update the Rx rings when possible */
1189 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1190 					   msecs_to_jiffies(10));
1191 	}
1192 
1193 	/* Mark any outstanding timestamps as stale, since they might have
1194 	 * been captured in hardware before the time update. This could lead
1195 	 * to us extending them with the wrong cached value resulting in
1196 	 * incorrect timestamp values.
1197 	 */
1198 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1199 }
1200 
1201 /**
1202  * ice_ptp_write_init - Set PHC time to provided value
1203  * @pf: Board private structure
1204  * @ts: timespec structure that holds the new time value
1205  *
1206  * Set the PHC time to the specified time provided in the timespec.
1207  */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1208 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1209 {
1210 	u64 ns = timespec64_to_ns(ts);
1211 	struct ice_hw *hw = &pf->hw;
1212 
1213 	return ice_ptp_init_time(hw, ns);
1214 }
1215 
1216 /**
1217  * ice_ptp_write_adj - Adjust PHC clock time atomically
1218  * @pf: Board private structure
1219  * @adj: Adjustment in nanoseconds
1220  *
1221  * Perform an atomic adjustment of the PHC time by the specified number of
1222  * nanoseconds.
1223  */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1224 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1225 {
1226 	struct ice_hw *hw = &pf->hw;
1227 
1228 	return ice_ptp_adj_clock(hw, adj);
1229 }
1230 
1231 /**
1232  * ice_base_incval - Get base timer increment value
1233  * @pf: Board private structure
1234  *
1235  * Look up the base timer increment value for this device. The base increment
1236  * value is used to define the nominal clock tick rate. This increment value
1237  * is programmed during device initialization. It is also used as the basis
1238  * for calculating adjustments using scaled_ppm.
1239  */
ice_base_incval(struct ice_pf * pf)1240 static u64 ice_base_incval(struct ice_pf *pf)
1241 {
1242 	struct ice_hw *hw = &pf->hw;
1243 	u64 incval;
1244 
1245 	incval = ice_get_base_incval(hw);
1246 
1247 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1248 		incval);
1249 
1250 	return incval;
1251 }
1252 
1253 /**
1254  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1255  * @port: PTP port for which Tx FIFO is checked
1256  */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1257 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1258 {
1259 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1260 	int quad = ICE_GET_QUAD_NUM(port->port_num);
1261 	struct ice_pf *pf;
1262 	struct ice_hw *hw;
1263 	u32 val, phy_sts;
1264 	int err;
1265 
1266 	pf = ptp_port_to_pf(port);
1267 	hw = &pf->hw;
1268 
1269 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1270 		return 0;
1271 
1272 	/* need to read FIFO state */
1273 	if (offs == 0 || offs == 1)
1274 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1275 					     &val);
1276 	else
1277 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1278 					     &val);
1279 
1280 	if (err) {
1281 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1282 			port->port_num, err);
1283 		return err;
1284 	}
1285 
1286 	if (offs & 0x1)
1287 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1288 	else
1289 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1290 
1291 	if (phy_sts & FIFO_EMPTY) {
1292 		port->tx_fifo_busy_cnt = FIFO_OK;
1293 		return 0;
1294 	}
1295 
1296 	port->tx_fifo_busy_cnt++;
1297 
1298 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1299 		port->tx_fifo_busy_cnt, port->port_num);
1300 
1301 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1302 		dev_dbg(ice_pf_to_dev(pf),
1303 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1304 			port->port_num, quad);
1305 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1306 		port->tx_fifo_busy_cnt = FIFO_OK;
1307 		return 0;
1308 	}
1309 
1310 	return -EAGAIN;
1311 }
1312 
1313 /**
1314  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1315  * @work: Pointer to the kthread_work structure for this task
1316  *
1317  * Check whether hardware has completed measuring the Tx and Rx offset values
1318  * used to configure and enable vernier timestamp calibration.
1319  *
1320  * Once the offset in either direction is measured, configure the associated
1321  * registers with the calibrated offset values and enable timestamping. The Tx
1322  * and Rx directions are configured independently as soon as their associated
1323  * offsets are known.
1324  *
1325  * This function reschedules itself until both Tx and Rx calibration have
1326  * completed.
1327  */
ice_ptp_wait_for_offsets(struct kthread_work * work)1328 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1329 {
1330 	struct ice_ptp_port *port;
1331 	struct ice_pf *pf;
1332 	struct ice_hw *hw;
1333 	int tx_err;
1334 	int rx_err;
1335 
1336 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1337 	pf = ptp_port_to_pf(port);
1338 	hw = &pf->hw;
1339 
1340 	if (ice_is_reset_in_progress(pf->state)) {
1341 		/* wait for device driver to complete reset */
1342 		kthread_queue_delayed_work(pf->ptp.kworker,
1343 					   &port->ov_work,
1344 					   msecs_to_jiffies(100));
1345 		return;
1346 	}
1347 
1348 	tx_err = ice_ptp_check_tx_fifo(port);
1349 	if (!tx_err)
1350 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1351 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1352 	if (tx_err || rx_err) {
1353 		/* Tx and/or Rx offset not yet configured, try again later */
1354 		kthread_queue_delayed_work(pf->ptp.kworker,
1355 					   &port->ov_work,
1356 					   msecs_to_jiffies(100));
1357 		return;
1358 	}
1359 }
1360 
1361 /**
1362  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1363  * @ptp_port: PTP port to stop
1364  */
1365 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1366 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1367 {
1368 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1369 	u8 port = ptp_port->port_num;
1370 	struct ice_hw *hw = &pf->hw;
1371 	int err;
1372 
1373 	if (ice_is_e810(hw))
1374 		return 0;
1375 
1376 	mutex_lock(&ptp_port->ps_lock);
1377 
1378 	switch (ice_get_phy_model(hw)) {
1379 	case ICE_PHY_ETH56G:
1380 		err = ice_stop_phy_timer_eth56g(hw, port, true);
1381 		break;
1382 	case ICE_PHY_E82X:
1383 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1384 
1385 		err = ice_stop_phy_timer_e82x(hw, port, true);
1386 		break;
1387 	default:
1388 		err = -ENODEV;
1389 	}
1390 	if (err && err != -EBUSY)
1391 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1392 			port, err);
1393 
1394 	mutex_unlock(&ptp_port->ps_lock);
1395 
1396 	return err;
1397 }
1398 
1399 /**
1400  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1401  * @ptp_port: PTP port for which the PHY start is set
1402  *
1403  * Start the PHY timestamping block, and initiate Vernier timestamping
1404  * calibration. If timestamping cannot be calibrated (such as if link is down)
1405  * then disable the timestamping block instead.
1406  */
1407 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1408 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1409 {
1410 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1411 	u8 port = ptp_port->port_num;
1412 	struct ice_hw *hw = &pf->hw;
1413 	unsigned long flags;
1414 	int err;
1415 
1416 	if (ice_is_e810(hw))
1417 		return 0;
1418 
1419 	if (!ptp_port->link_up)
1420 		return ice_ptp_port_phy_stop(ptp_port);
1421 
1422 	mutex_lock(&ptp_port->ps_lock);
1423 
1424 	switch (ice_get_phy_model(hw)) {
1425 	case ICE_PHY_ETH56G:
1426 		err = ice_start_phy_timer_eth56g(hw, port);
1427 		break;
1428 	case ICE_PHY_E82X:
1429 		/* Start the PHY timer in Vernier mode */
1430 		kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1431 
1432 		/* temporarily disable Tx timestamps while calibrating
1433 		 * PHY offset
1434 		 */
1435 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1436 		ptp_port->tx.calibrating = true;
1437 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1438 		ptp_port->tx_fifo_busy_cnt = 0;
1439 
1440 		/* Start the PHY timer in Vernier mode */
1441 		err = ice_start_phy_timer_e82x(hw, port);
1442 		if (err)
1443 			break;
1444 
1445 		/* Enable Tx timestamps right away */
1446 		spin_lock_irqsave(&ptp_port->tx.lock, flags);
1447 		ptp_port->tx.calibrating = false;
1448 		spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1449 
1450 		kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
1451 					   0);
1452 		break;
1453 	default:
1454 		err = -ENODEV;
1455 	}
1456 
1457 	if (err)
1458 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1459 			port, err);
1460 
1461 	mutex_unlock(&ptp_port->ps_lock);
1462 
1463 	return err;
1464 }
1465 
1466 /**
1467  * ice_ptp_link_change - Reconfigure PTP after link status change
1468  * @pf: Board private structure
1469  * @linkup: Link is up or down
1470  */
ice_ptp_link_change(struct ice_pf * pf,bool linkup)1471 void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
1472 {
1473 	struct ice_ptp_port *ptp_port;
1474 	struct ice_hw *hw = &pf->hw;
1475 
1476 	if (pf->ptp.state != ICE_PTP_READY)
1477 		return;
1478 
1479 	ptp_port = &pf->ptp.port;
1480 
1481 	/* Update cached link status for this port immediately */
1482 	ptp_port->link_up = linkup;
1483 
1484 	/* Skip HW writes if reset is in progress */
1485 	if (pf->hw.reset_ongoing)
1486 		return;
1487 	switch (ice_get_phy_model(hw)) {
1488 	case ICE_PHY_E810:
1489 		/* Do not reconfigure E810 PHY */
1490 		return;
1491 	case ICE_PHY_ETH56G:
1492 	case ICE_PHY_E82X:
1493 		ice_ptp_port_phy_restart(ptp_port);
1494 		return;
1495 	default:
1496 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1497 	}
1498 }
1499 
1500 /**
1501  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1502  * @pf: PF private structure
1503  * @ena: bool value to enable or disable interrupt
1504  * @threshold: Minimum number of packets at which intr is triggered
1505  *
1506  * Utility function to configure all the PHY interrupt settings, including
1507  * whether the PHY interrupt is enabled, and what threshold to use. Also
1508  * configures The E82X timestamp owner to react to interrupts from all PHYs.
1509  *
1510  * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
1511  * when failed to configure PHY interrupt for E82X
1512  */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1513 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1514 {
1515 	struct device *dev = ice_pf_to_dev(pf);
1516 	struct ice_hw *hw = &pf->hw;
1517 
1518 	ice_ptp_reset_ts_memory(hw);
1519 
1520 	switch (ice_get_phy_model(hw)) {
1521 	case ICE_PHY_ETH56G: {
1522 		int port;
1523 
1524 		for (port = 0; port < hw->ptp.num_lports; port++) {
1525 			int err;
1526 
1527 			err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
1528 			if (err) {
1529 				dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
1530 					port, err);
1531 				return err;
1532 			}
1533 		}
1534 
1535 		return 0;
1536 	}
1537 	case ICE_PHY_E82X: {
1538 		int quad;
1539 
1540 		for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
1541 		     quad++) {
1542 			int err;
1543 
1544 			err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
1545 			if (err) {
1546 				dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
1547 					quad, err);
1548 				return err;
1549 			}
1550 		}
1551 
1552 		return 0;
1553 	}
1554 	case ICE_PHY_E810:
1555 		return 0;
1556 	case ICE_PHY_UNSUP:
1557 	default:
1558 		dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
1559 			 ice_get_phy_model(hw));
1560 		return -EOPNOTSUPP;
1561 	}
1562 }
1563 
1564 /**
1565  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1566  * @pf: Board private structure
1567  */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1568 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1569 {
1570 	ice_ptp_port_phy_restart(&pf->ptp.port);
1571 }
1572 
1573 /**
1574  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1575  * @pf: Board private structure
1576  */
ice_ptp_restart_all_phy(struct ice_pf * pf)1577 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1578 {
1579 	struct list_head *entry;
1580 
1581 	list_for_each(entry, &pf->adapter->ports.ports) {
1582 		struct ice_ptp_port *port = list_entry(entry,
1583 						       struct ice_ptp_port,
1584 						       list_node);
1585 
1586 		if (port->link_up)
1587 			ice_ptp_port_phy_restart(port);
1588 	}
1589 }
1590 
1591 /**
1592  * ice_ptp_adjfine - Adjust clock increment rate
1593  * @info: the driver's PTP info structure
1594  * @scaled_ppm: Parts per million with 16-bit fractional field
1595  *
1596  * Adjust the frequency of the clock by the indicated scaled ppm from the
1597  * base frequency.
1598  */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1599 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1600 {
1601 	struct ice_pf *pf = ptp_info_to_pf(info);
1602 	struct ice_hw *hw = &pf->hw;
1603 	u64 incval;
1604 	int err;
1605 
1606 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1607 	err = ice_ptp_write_incval_locked(hw, incval);
1608 	if (err) {
1609 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1610 			err);
1611 		return -EIO;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 /**
1618  * ice_ptp_extts_event - Process PTP external clock event
1619  * @pf: Board private structure
1620  */
ice_ptp_extts_event(struct ice_pf * pf)1621 void ice_ptp_extts_event(struct ice_pf *pf)
1622 {
1623 	struct ptp_clock_event event;
1624 	struct ice_hw *hw = &pf->hw;
1625 	u8 chan, tmr_idx;
1626 	u32 hi, lo;
1627 
1628 	/* Don't process timestamp events if PTP is not ready */
1629 	if (pf->ptp.state != ICE_PTP_READY)
1630 		return;
1631 
1632 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1633 	/* Event time is captured by one of the two matched registers
1634 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1635 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1636 	 * Event is defined in GLTSYN_EVNT_0 register
1637 	 */
1638 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1639 		/* Check if channel is enabled */
1640 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
1641 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1642 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1643 			event.timestamp = (((u64)hi) << 32) | lo;
1644 			event.type = PTP_CLOCK_EXTTS;
1645 			event.index = chan;
1646 
1647 			/* Fire event */
1648 			ptp_clock_event(pf->ptp.clock, &event);
1649 			pf->ptp.ext_ts_irq &= ~(1 << chan);
1650 		}
1651 	}
1652 }
1653 
1654 /**
1655  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1656  * @pf: Board private structure
1657  * @chan: GPIO channel (0-3)
1658  * @config: desired EXTTS configuration.
1659  * @store: If set to true, the values will be stored
1660  *
1661  * Configure an external timestamp event on the requested channel.
1662  *
1663  * Return: 0 on success, -EOPNOTUSPP on unsupported flags
1664  */
ice_ptp_cfg_extts(struct ice_pf * pf,unsigned int chan,struct ice_extts_channel * config,bool store)1665 static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
1666 			     struct ice_extts_channel *config, bool store)
1667 {
1668 	u32 func, aux_reg, gpio_reg, irq_reg;
1669 	struct ice_hw *hw = &pf->hw;
1670 	u8 tmr_idx;
1671 
1672 	/* Reject requests with unsupported flags */
1673 	if (config->flags & ~(PTP_ENABLE_FEATURE |
1674 			      PTP_RISING_EDGE |
1675 			      PTP_FALLING_EDGE |
1676 			      PTP_STRICT_FLAGS))
1677 		return -EOPNOTSUPP;
1678 
1679 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1680 
1681 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1682 
1683 	if (config->ena) {
1684 		/* Enable the interrupt */
1685 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1686 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1687 
1688 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1689 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1690 
1691 		/* set event level to requested edge */
1692 		if (config->flags & PTP_FALLING_EDGE)
1693 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1694 		if (config->flags & PTP_RISING_EDGE)
1695 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1696 
1697 		/* Write GPIO CTL reg.
1698 		 * 0x1 is input sampled by EVENT register(channel)
1699 		 * + num_in_channels * tmr_idx
1700 		 */
1701 		func = 1 + chan + (tmr_idx * 3);
1702 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1703 		pf->ptp.ext_ts_chan |= (1 << chan);
1704 	} else {
1705 		/* clear the values we set to reset defaults */
1706 		aux_reg = 0;
1707 		gpio_reg = 0;
1708 		pf->ptp.ext_ts_chan &= ~(1 << chan);
1709 		if (!pf->ptp.ext_ts_chan)
1710 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1711 	}
1712 
1713 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1714 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1715 	wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
1716 
1717 	if (store)
1718 		memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
1719 
1720 	return 0;
1721 }
1722 
1723 /**
1724  * ice_ptp_disable_all_extts - Disable all EXTTS channels
1725  * @pf: Board private structure
1726  */
ice_ptp_disable_all_extts(struct ice_pf * pf)1727 static void ice_ptp_disable_all_extts(struct ice_pf *pf)
1728 {
1729 	struct ice_extts_channel extts_cfg = {};
1730 	int i;
1731 
1732 	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1733 		if (pf->ptp.extts_channels[i].ena) {
1734 			extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
1735 			extts_cfg.ena = false;
1736 			ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
1737 		}
1738 	}
1739 
1740 	synchronize_irq(pf->oicr_irq.virq);
1741 }
1742 
1743 /**
1744  * ice_ptp_enable_all_extts - Enable all EXTTS channels
1745  * @pf: Board private structure
1746  *
1747  * Called during reset to restore user configuration.
1748  */
ice_ptp_enable_all_extts(struct ice_pf * pf)1749 static void ice_ptp_enable_all_extts(struct ice_pf *pf)
1750 {
1751 	int i;
1752 
1753 	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
1754 		if (pf->ptp.extts_channels[i].ena)
1755 			ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
1756 					  false);
1757 	}
1758 }
1759 
1760 /**
1761  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1762  * @pf: Board private structure
1763  * @chan: GPIO channel (0-3)
1764  * @config: desired periodic clk configuration. NULL will disable channel
1765  * @store: If set to true the values will be stored
1766  *
1767  * Configure the internal clock generator modules to generate the clock wave of
1768  * specified period.
1769  */
ice_ptp_cfg_clkout(struct ice_pf * pf,unsigned int chan,struct ice_perout_channel * config,bool store)1770 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1771 			      struct ice_perout_channel *config, bool store)
1772 {
1773 	u64 current_time, period, start_time, phase;
1774 	struct ice_hw *hw = &pf->hw;
1775 	u32 func, val, gpio_pin;
1776 	u8 tmr_idx;
1777 
1778 	if (config && config->flags & ~PTP_PEROUT_PHASE)
1779 		return -EOPNOTSUPP;
1780 
1781 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1782 
1783 	/* 0. Reset mode & out_en in AUX_OUT */
1784 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1785 
1786 	/* If we're disabling the output, clear out CLKO and TGT and keep
1787 	 * output level low
1788 	 */
1789 	if (!config || !config->ena) {
1790 		wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1791 		wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1792 		wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1793 
1794 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
1795 		gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1796 		wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1797 
1798 		/* Store the value if requested */
1799 		if (store)
1800 			memset(&pf->ptp.perout_channels[chan], 0,
1801 			       sizeof(struct ice_perout_channel));
1802 
1803 		return 0;
1804 	}
1805 	period = config->period;
1806 	start_time = config->start_time;
1807 	div64_u64_rem(start_time, period, &phase);
1808 	gpio_pin = config->gpio_pin;
1809 
1810 	/* 1. Write clkout with half of required period value */
1811 	if (period & 0x1) {
1812 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1813 		goto err;
1814 	}
1815 
1816 	period >>= 1;
1817 
1818 	/* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1819 	 */
1820 #define MIN_PULSE 3
1821 	if (period <= MIN_PULSE || period > U32_MAX) {
1822 		dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1823 			MIN_PULSE * 2);
1824 		goto err;
1825 	}
1826 
1827 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1828 
1829 	/* Allow time for programming before start_time is hit */
1830 	current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1831 
1832 	/* if start time is in the past start the timer at the nearest second
1833 	 * maintaining phase
1834 	 */
1835 	if (start_time < current_time)
1836 		start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
1837 
1838 	if (ice_is_e810(hw))
1839 		start_time -= E810_OUT_PROP_DELAY_NS;
1840 	else
1841 		start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
1842 
1843 	/* 2. Write TARGET time */
1844 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1845 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1846 
1847 	/* 3. Write AUX_OUT register */
1848 	val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1849 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1850 
1851 	/* 4. write GPIO CTL reg */
1852 	func = 8 + chan + (tmr_idx * 4);
1853 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
1854 	      FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1855 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1856 
1857 	/* Store the value if requested */
1858 	if (store) {
1859 		memcpy(&pf->ptp.perout_channels[chan], config,
1860 		       sizeof(struct ice_perout_channel));
1861 		pf->ptp.perout_channels[chan].start_time = phase;
1862 	}
1863 
1864 	return 0;
1865 err:
1866 	dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1867 	return -EFAULT;
1868 }
1869 
1870 /**
1871  * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1872  * @pf: pointer to the PF structure
1873  *
1874  * Disable all currently configured clock outputs. This is necessary before
1875  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1876  * re-enable the clocks again.
1877  */
ice_ptp_disable_all_clkout(struct ice_pf * pf)1878 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1879 {
1880 	uint i;
1881 
1882 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1883 		if (pf->ptp.perout_channels[i].ena)
1884 			ice_ptp_cfg_clkout(pf, i, NULL, false);
1885 }
1886 
1887 /**
1888  * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1889  * @pf: pointer to the PF structure
1890  *
1891  * Enable all currently configured clock outputs. Use this after
1892  * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1893  * their configuration.
1894  */
ice_ptp_enable_all_clkout(struct ice_pf * pf)1895 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1896 {
1897 	uint i;
1898 
1899 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1900 		if (pf->ptp.perout_channels[i].ena)
1901 			ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1902 					   false);
1903 }
1904 
1905 /**
1906  * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1907  * @info: the driver's PTP info structure
1908  * @rq: The requested feature to change
1909  * @on: Enable/disable flag
1910  */
1911 static int
ice_ptp_gpio_enable_e810(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1912 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1913 			 struct ptp_clock_request *rq, int on)
1914 {
1915 	struct ice_pf *pf = ptp_info_to_pf(info);
1916 	bool sma_pres = false;
1917 	unsigned int chan;
1918 	u32 gpio_pin;
1919 
1920 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1921 		sma_pres = true;
1922 
1923 	switch (rq->type) {
1924 	case PTP_CLK_REQ_PEROUT:
1925 	{
1926 		struct ice_perout_channel clk_cfg = {};
1927 
1928 		chan = rq->perout.index;
1929 		if (sma_pres) {
1930 			if (chan == ice_pin_desc_e810t[SMA1].chan)
1931 				clk_cfg.gpio_pin = GPIO_20;
1932 			else if (chan == ice_pin_desc_e810t[SMA2].chan)
1933 				clk_cfg.gpio_pin = GPIO_22;
1934 			else
1935 				return -1;
1936 		} else if (ice_is_e810t(&pf->hw)) {
1937 			if (chan == 0)
1938 				clk_cfg.gpio_pin = GPIO_20;
1939 			else
1940 				clk_cfg.gpio_pin = GPIO_22;
1941 		} else if (chan == PPS_CLK_GEN_CHAN) {
1942 			clk_cfg.gpio_pin = PPS_PIN_INDEX;
1943 		} else {
1944 			clk_cfg.gpio_pin = chan;
1945 		}
1946 
1947 		clk_cfg.flags = rq->perout.flags;
1948 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1949 				   rq->perout.period.nsec);
1950 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1951 				       rq->perout.start.nsec);
1952 		clk_cfg.ena = !!on;
1953 
1954 		return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1955 	}
1956 	case PTP_CLK_REQ_EXTTS:
1957 	{
1958 		struct ice_extts_channel extts_cfg = {};
1959 
1960 		chan = rq->extts.index;
1961 		if (sma_pres) {
1962 			if (chan < ice_pin_desc_e810t[SMA2].chan)
1963 				gpio_pin = GPIO_21;
1964 			else
1965 				gpio_pin = GPIO_23;
1966 		} else if (ice_is_e810t(&pf->hw)) {
1967 			if (chan == 0)
1968 				gpio_pin = GPIO_21;
1969 			else
1970 				gpio_pin = GPIO_23;
1971 		} else {
1972 			gpio_pin = chan;
1973 		}
1974 
1975 		extts_cfg.flags = rq->extts.flags;
1976 		extts_cfg.gpio_pin = gpio_pin;
1977 		extts_cfg.ena = !!on;
1978 
1979 		return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
1980 	}
1981 	default:
1982 		return -EOPNOTSUPP;
1983 	}
1984 }
1985 
1986 /**
1987  * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
1988  * @info: the driver's PTP info structure
1989  * @rq: The requested feature to change
1990  * @on: Enable/disable flag
1991  */
ice_ptp_gpio_enable_e823(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1992 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
1993 				    struct ptp_clock_request *rq, int on)
1994 {
1995 	struct ice_pf *pf = ptp_info_to_pf(info);
1996 
1997 	switch (rq->type) {
1998 	case PTP_CLK_REQ_PPS:
1999 	{
2000 		struct ice_perout_channel clk_cfg = {};
2001 
2002 		clk_cfg.flags = rq->perout.flags;
2003 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
2004 		clk_cfg.period = NSEC_PER_SEC;
2005 		clk_cfg.ena = !!on;
2006 
2007 		return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
2008 	}
2009 	case PTP_CLK_REQ_EXTTS:
2010 	{
2011 		struct ice_extts_channel extts_cfg = {};
2012 
2013 		extts_cfg.flags = rq->extts.flags;
2014 		extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
2015 		extts_cfg.ena = !!on;
2016 
2017 		return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
2018 	}
2019 	default:
2020 		return -EOPNOTSUPP;
2021 	}
2022 }
2023 
2024 /**
2025  * ice_ptp_gettimex64 - Get the time of the clock
2026  * @info: the driver's PTP info structure
2027  * @ts: timespec64 structure to hold the current time value
2028  * @sts: Optional parameter for holding a pair of system timestamps from
2029  *       the system clock. Will be ignored if NULL is given.
2030  *
2031  * Read the device clock and return the correct value on ns, after converting it
2032  * into a timespec struct.
2033  */
2034 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)2035 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
2036 		   struct ptp_system_timestamp *sts)
2037 {
2038 	struct ice_pf *pf = ptp_info_to_pf(info);
2039 	u64 time_ns;
2040 
2041 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
2042 	*ts = ns_to_timespec64(time_ns);
2043 	return 0;
2044 }
2045 
2046 /**
2047  * ice_ptp_settime64 - Set the time of the clock
2048  * @info: the driver's PTP info structure
2049  * @ts: timespec64 structure that holds the new time value
2050  *
2051  * Set the device clock to the user input value. The conversion from timespec
2052  * to ns happens in the write function.
2053  */
2054 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)2055 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
2056 {
2057 	struct ice_pf *pf = ptp_info_to_pf(info);
2058 	struct timespec64 ts64 = *ts;
2059 	struct ice_hw *hw = &pf->hw;
2060 	int err;
2061 
2062 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
2063 	 * Start with marking timestamps as invalid.
2064 	 */
2065 	if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
2066 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
2067 		if (err)
2068 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
2069 	}
2070 
2071 	if (!ice_ptp_lock(hw)) {
2072 		err = -EBUSY;
2073 		goto exit;
2074 	}
2075 
2076 	/* Disable periodic outputs */
2077 	ice_ptp_disable_all_clkout(pf);
2078 
2079 	err = ice_ptp_write_init(pf, &ts64);
2080 	ice_ptp_unlock(hw);
2081 
2082 	if (!err)
2083 		ice_ptp_reset_cached_phctime(pf);
2084 
2085 	/* Reenable periodic outputs */
2086 	ice_ptp_enable_all_clkout(pf);
2087 
2088 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
2089 	if (ice_get_phy_model(hw) == ICE_PHY_E82X)
2090 		ice_ptp_restart_all_phy(pf);
2091 exit:
2092 	if (err) {
2093 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
2094 		return err;
2095 	}
2096 
2097 	return 0;
2098 }
2099 
2100 /**
2101  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
2102  * @info: the driver's PTP info structure
2103  * @delta: Offset in nanoseconds to adjust the time by
2104  */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)2105 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2106 {
2107 	struct timespec64 now, then;
2108 	int ret;
2109 
2110 	then = ns_to_timespec64(delta);
2111 	ret = ice_ptp_gettimex64(info, &now, NULL);
2112 	if (ret)
2113 		return ret;
2114 	now = timespec64_add(now, then);
2115 
2116 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2117 }
2118 
2119 /**
2120  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
2121  * @info: the driver's PTP info structure
2122  * @delta: Offset in nanoseconds to adjust the time by
2123  */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)2124 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
2125 {
2126 	struct ice_pf *pf = ptp_info_to_pf(info);
2127 	struct ice_hw *hw = &pf->hw;
2128 	struct device *dev;
2129 	int err;
2130 
2131 	dev = ice_pf_to_dev(pf);
2132 
2133 	/* Hardware only supports atomic adjustments using signed 32-bit
2134 	 * integers. For any adjustment outside this range, perform
2135 	 * a non-atomic get->adjust->set flow.
2136 	 */
2137 	if (delta > S32_MAX || delta < S32_MIN) {
2138 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2139 		return ice_ptp_adjtime_nonatomic(info, delta);
2140 	}
2141 
2142 	if (!ice_ptp_lock(hw)) {
2143 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2144 		return -EBUSY;
2145 	}
2146 
2147 	/* Disable periodic outputs */
2148 	ice_ptp_disable_all_clkout(pf);
2149 
2150 	err = ice_ptp_write_adj(pf, delta);
2151 
2152 	/* Reenable periodic outputs */
2153 	ice_ptp_enable_all_clkout(pf);
2154 
2155 	ice_ptp_unlock(hw);
2156 
2157 	if (err) {
2158 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2159 		return err;
2160 	}
2161 
2162 	ice_ptp_reset_cached_phctime(pf);
2163 
2164 	return 0;
2165 }
2166 
2167 #ifdef CONFIG_ICE_HWTS
2168 /**
2169  * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2170  * @device: Current device time
2171  * @system: System counter value read synchronously with device time
2172  * @ctx: Context provided by timekeeping code
2173  *
2174  * Read device and system (ART) clock simultaneously and return the corrected
2175  * clock values in ns.
2176  */
2177 static int
ice_ptp_get_syncdevicetime(ktime_t * device,struct system_counterval_t * system,void * ctx)2178 ice_ptp_get_syncdevicetime(ktime_t *device,
2179 			   struct system_counterval_t *system,
2180 			   void *ctx)
2181 {
2182 	struct ice_pf *pf = (struct ice_pf *)ctx;
2183 	struct ice_hw *hw = &pf->hw;
2184 	u32 hh_lock, hh_art_ctl;
2185 	int i;
2186 
2187 #define MAX_HH_HW_LOCK_TRIES	5
2188 #define MAX_HH_CTL_LOCK_TRIES	100
2189 
2190 	for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2191 		/* Get the HW lock */
2192 		hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2193 		if (hh_lock & PFHH_SEM_BUSY_M) {
2194 			usleep_range(10000, 15000);
2195 			continue;
2196 		}
2197 		break;
2198 	}
2199 	if (hh_lock & PFHH_SEM_BUSY_M) {
2200 		dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2201 		return -EBUSY;
2202 	}
2203 
2204 	/* Program cmd to master timer */
2205 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2206 
2207 	/* Start the ART and device clock sync sequence */
2208 	hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2209 	hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2210 	wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2211 
2212 	for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2213 		/* Wait for sync to complete */
2214 		hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2215 		if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2216 			udelay(1);
2217 			continue;
2218 		} else {
2219 			u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2220 			u64 hh_ts;
2221 
2222 			tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2223 			/* Read ART time */
2224 			hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2225 			hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2226 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2227 			system->cycles = hh_ts;
2228 			system->cs_id = CSID_X86_ART;
2229 			system->use_nsecs = true;
2230 			/* Read Device source clock time */
2231 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2232 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2233 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2234 			*device = ns_to_ktime(hh_ts);
2235 			break;
2236 		}
2237 	}
2238 
2239 	/* Clear the master timer */
2240 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2241 
2242 	/* Release HW lock */
2243 	hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2244 	hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2245 	wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2246 
2247 	if (i == MAX_HH_CTL_LOCK_TRIES)
2248 		return -ETIMEDOUT;
2249 
2250 	return 0;
2251 }
2252 
2253 /**
2254  * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2255  * @info: the driver's PTP info structure
2256  * @cts: The memory to fill the cross timestamp info
2257  *
2258  * Capture a cross timestamp between the ART and the device PTP hardware
2259  * clock. Fill the cross timestamp information and report it back to the
2260  * caller.
2261  *
2262  * This is only valid for E822 and E823 devices which have support for
2263  * generating the cross timestamp via PCIe PTM.
2264  *
2265  * In order to correctly correlate the ART timestamp back to the TSC time, the
2266  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2267  */
2268 static int
ice_ptp_getcrosststamp_e82x(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2269 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2270 			    struct system_device_crosststamp *cts)
2271 {
2272 	struct ice_pf *pf = ptp_info_to_pf(info);
2273 
2274 	return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2275 					     pf, NULL, cts);
2276 }
2277 #endif /* CONFIG_ICE_HWTS */
2278 
2279 /**
2280  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2281  * @pf: Board private structure
2282  * @ifr: ioctl data
2283  *
2284  * Copy the timestamping config to user buffer
2285  */
ice_ptp_get_ts_config(struct ice_pf * pf,struct ifreq * ifr)2286 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2287 {
2288 	struct hwtstamp_config *config;
2289 
2290 	if (pf->ptp.state != ICE_PTP_READY)
2291 		return -EIO;
2292 
2293 	config = &pf->ptp.tstamp_config;
2294 
2295 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2296 		-EFAULT : 0;
2297 }
2298 
2299 /**
2300  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2301  * @pf: Board private structure
2302  * @config: hwtstamp settings requested or saved
2303  */
2304 static int
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct hwtstamp_config * config)2305 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2306 {
2307 	switch (config->tx_type) {
2308 	case HWTSTAMP_TX_OFF:
2309 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2310 		break;
2311 	case HWTSTAMP_TX_ON:
2312 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2313 		break;
2314 	default:
2315 		return -ERANGE;
2316 	}
2317 
2318 	switch (config->rx_filter) {
2319 	case HWTSTAMP_FILTER_NONE:
2320 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2321 		break;
2322 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2323 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2324 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2325 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2326 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2327 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2328 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2329 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2330 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2331 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2332 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2333 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2334 	case HWTSTAMP_FILTER_NTP_ALL:
2335 	case HWTSTAMP_FILTER_ALL:
2336 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2337 		break;
2338 	default:
2339 		return -ERANGE;
2340 	}
2341 
2342 	/* Immediately update the device timestamping mode */
2343 	ice_ptp_restore_timestamp_mode(pf);
2344 
2345 	return 0;
2346 }
2347 
2348 /**
2349  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2350  * @pf: Board private structure
2351  * @ifr: ioctl data
2352  *
2353  * Get the user config and store it
2354  */
ice_ptp_set_ts_config(struct ice_pf * pf,struct ifreq * ifr)2355 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2356 {
2357 	struct hwtstamp_config config;
2358 	int err;
2359 
2360 	if (pf->ptp.state != ICE_PTP_READY)
2361 		return -EAGAIN;
2362 
2363 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2364 		return -EFAULT;
2365 
2366 	err = ice_ptp_set_timestamp_mode(pf, &config);
2367 	if (err)
2368 		return err;
2369 
2370 	/* Return the actual configuration set */
2371 	config = pf->ptp.tstamp_config;
2372 
2373 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2374 		-EFAULT : 0;
2375 }
2376 
2377 /**
2378  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2379  * @rx_desc: Receive descriptor
2380  * @pkt_ctx: Packet context to get the cached time
2381  *
2382  * The driver receives a notification in the receive descriptor with timestamp.
2383  */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2384 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2385 			const struct ice_pkt_ctx *pkt_ctx)
2386 {
2387 	u64 ts_ns, cached_time;
2388 	u32 ts_high;
2389 
2390 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2391 		return 0;
2392 
2393 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2394 
2395 	/* Do not report a timestamp if we don't have a cached PHC time */
2396 	if (!cached_time)
2397 		return 0;
2398 
2399 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2400 	 * PHC value, rather than accessing the PF. This also allows us to
2401 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2402 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2403 	 * bits itself.
2404 	 */
2405 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2406 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2407 
2408 	return ts_ns;
2409 }
2410 
2411 /**
2412  * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2413  * @pf: pointer to the PF structure
2414  * @info: PTP clock info structure
2415  *
2416  * Disable the OS access to the SMA pins. Called to clear out the OS
2417  * indications of pin support when we fail to setup the E810-T SMA control
2418  * register.
2419  */
2420 static void
ice_ptp_disable_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2421 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2422 {
2423 	struct device *dev = ice_pf_to_dev(pf);
2424 
2425 	dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2426 
2427 	info->enable = NULL;
2428 	info->verify = NULL;
2429 	info->n_pins = 0;
2430 	info->n_ext_ts = 0;
2431 	info->n_per_out = 0;
2432 }
2433 
2434 /**
2435  * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2436  * @pf: pointer to the PF structure
2437  * @info: PTP clock info structure
2438  *
2439  * Finish setting up the SMA pins by allocating pin_config, and setting it up
2440  * according to the current status of the SMA. On failure, disable all of the
2441  * extended SMA pin support.
2442  */
2443 static void
ice_ptp_setup_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2444 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2445 {
2446 	struct device *dev = ice_pf_to_dev(pf);
2447 	int err;
2448 
2449 	/* Allocate memory for kernel pins interface */
2450 	info->pin_config = devm_kcalloc(dev, info->n_pins,
2451 					sizeof(*info->pin_config), GFP_KERNEL);
2452 	if (!info->pin_config) {
2453 		ice_ptp_disable_sma_pins_e810t(pf, info);
2454 		return;
2455 	}
2456 
2457 	/* Read current SMA status */
2458 	err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2459 	if (err)
2460 		ice_ptp_disable_sma_pins_e810t(pf, info);
2461 }
2462 
2463 /**
2464  * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2465  * @pf: pointer to the PF instance
2466  * @info: PTP clock capabilities
2467  */
2468 static void
ice_ptp_setup_pins_e810(struct ice_pf * pf,struct ptp_clock_info * info)2469 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2470 {
2471 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2472 		info->n_ext_ts = N_EXT_TS_E810;
2473 		info->n_per_out = N_PER_OUT_E810T;
2474 		info->n_pins = NUM_PTP_PINS_E810T;
2475 		info->verify = ice_verify_pin_e810t;
2476 
2477 		/* Complete setup of the SMA pins */
2478 		ice_ptp_setup_sma_pins_e810t(pf, info);
2479 	} else if (ice_is_e810t(&pf->hw)) {
2480 		info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
2481 		info->n_per_out = N_PER_OUT_NO_SMA_E810T;
2482 	} else {
2483 		info->n_per_out = N_PER_OUT_E810;
2484 		info->n_ext_ts = N_EXT_TS_E810;
2485 	}
2486 }
2487 
2488 /**
2489  * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
2490  * @pf: pointer to the PF instance
2491  * @info: PTP clock capabilities
2492  */
2493 static void
ice_ptp_setup_pins_e823(struct ice_pf * pf,struct ptp_clock_info * info)2494 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2495 {
2496 	info->pps = 1;
2497 	info->n_per_out = 0;
2498 	info->n_ext_ts = 1;
2499 }
2500 
2501 /**
2502  * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
2503  * @pf: Board private structure
2504  * @info: PTP info to fill
2505  *
2506  * Assign functions to the PTP capabiltiies structure for E82x devices.
2507  * Functions which operate across all device families should be set directly
2508  * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
2509  * devices.
2510  */
2511 static void
ice_ptp_set_funcs_e82x(struct ice_pf * pf,struct ptp_clock_info * info)2512 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
2513 {
2514 #ifdef CONFIG_ICE_HWTS
2515 	if (boot_cpu_has(X86_FEATURE_ART) &&
2516 	    boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2517 		info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
2518 #endif /* CONFIG_ICE_HWTS */
2519 }
2520 
2521 /**
2522  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2523  * @pf: Board private structure
2524  * @info: PTP info to fill
2525  *
2526  * Assign functions to the PTP capabiltiies structure for E810 devices.
2527  * Functions which operate across all device families should be set directly
2528  * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2529  * devices.
2530  */
2531 static void
ice_ptp_set_funcs_e810(struct ice_pf * pf,struct ptp_clock_info * info)2532 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2533 {
2534 	info->enable = ice_ptp_gpio_enable_e810;
2535 	ice_ptp_setup_pins_e810(pf, info);
2536 }
2537 
2538 /**
2539  * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
2540  * @pf: Board private structure
2541  * @info: PTP info to fill
2542  *
2543  * Assign functions to the PTP capabiltiies structure for E823 devices.
2544  * Functions which operate across all device families should be set directly
2545  * in ice_ptp_set_caps. Only add functions here which are distinct for e823
2546  * devices.
2547  */
2548 static void
ice_ptp_set_funcs_e823(struct ice_pf * pf,struct ptp_clock_info * info)2549 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2550 {
2551 	ice_ptp_set_funcs_e82x(pf, info);
2552 
2553 	info->enable = ice_ptp_gpio_enable_e823;
2554 	ice_ptp_setup_pins_e823(pf, info);
2555 }
2556 
2557 /**
2558  * ice_ptp_set_caps - Set PTP capabilities
2559  * @pf: Board private structure
2560  */
ice_ptp_set_caps(struct ice_pf * pf)2561 static void ice_ptp_set_caps(struct ice_pf *pf)
2562 {
2563 	struct ptp_clock_info *info = &pf->ptp.info;
2564 	struct device *dev = ice_pf_to_dev(pf);
2565 
2566 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2567 		 dev_driver_string(dev), dev_name(dev));
2568 	info->owner = THIS_MODULE;
2569 	info->max_adj = 100000000;
2570 	info->adjtime = ice_ptp_adjtime;
2571 	info->adjfine = ice_ptp_adjfine;
2572 	info->gettimex64 = ice_ptp_gettimex64;
2573 	info->settime64 = ice_ptp_settime64;
2574 
2575 	if (ice_is_e810(&pf->hw))
2576 		ice_ptp_set_funcs_e810(pf, info);
2577 	else if (ice_is_e823(&pf->hw))
2578 		ice_ptp_set_funcs_e823(pf, info);
2579 	else
2580 		ice_ptp_set_funcs_e82x(pf, info);
2581 }
2582 
2583 /**
2584  * ice_ptp_create_clock - Create PTP clock device for userspace
2585  * @pf: Board private structure
2586  *
2587  * This function creates a new PTP clock device. It only creates one if we
2588  * don't already have one. Will return error if it can't create one, but success
2589  * if we already have a device. Should be used by ice_ptp_init to create clock
2590  * initially, and prevent global resets from creating new clock devices.
2591  */
ice_ptp_create_clock(struct ice_pf * pf)2592 static long ice_ptp_create_clock(struct ice_pf *pf)
2593 {
2594 	struct ptp_clock_info *info;
2595 	struct device *dev;
2596 
2597 	/* No need to create a clock device if we already have one */
2598 	if (pf->ptp.clock)
2599 		return 0;
2600 
2601 	ice_ptp_set_caps(pf);
2602 
2603 	info = &pf->ptp.info;
2604 	dev = ice_pf_to_dev(pf);
2605 
2606 	/* Attempt to register the clock before enabling the hardware. */
2607 	pf->ptp.clock = ptp_clock_register(info, dev);
2608 	if (IS_ERR(pf->ptp.clock)) {
2609 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2610 		return PTR_ERR(pf->ptp.clock);
2611 	}
2612 
2613 	return 0;
2614 }
2615 
2616 /**
2617  * ice_ptp_request_ts - Request an available Tx timestamp index
2618  * @tx: the PTP Tx timestamp tracker to request from
2619  * @skb: the SKB to associate with this timestamp request
2620  */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2621 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2622 {
2623 	unsigned long flags;
2624 	u8 idx;
2625 
2626 	spin_lock_irqsave(&tx->lock, flags);
2627 
2628 	/* Check that this tracker is accepting new timestamp requests */
2629 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2630 		spin_unlock_irqrestore(&tx->lock, flags);
2631 		return -1;
2632 	}
2633 
2634 	/* Find and set the first available index */
2635 	idx = find_next_zero_bit(tx->in_use, tx->len,
2636 				 tx->last_ll_ts_idx_read + 1);
2637 	if (idx == tx->len)
2638 		idx = find_first_zero_bit(tx->in_use, tx->len);
2639 
2640 	if (idx < tx->len) {
2641 		/* We got a valid index that no other thread could have set. Store
2642 		 * a reference to the skb and the start time to allow discarding old
2643 		 * requests.
2644 		 */
2645 		set_bit(idx, tx->in_use);
2646 		clear_bit(idx, tx->stale);
2647 		tx->tstamps[idx].start = jiffies;
2648 		tx->tstamps[idx].skb = skb_get(skb);
2649 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2650 		ice_trace(tx_tstamp_request, skb, idx);
2651 	}
2652 
2653 	spin_unlock_irqrestore(&tx->lock, flags);
2654 
2655 	/* return the appropriate PHY timestamp register index, -1 if no
2656 	 * indexes were available.
2657 	 */
2658 	if (idx >= tx->len)
2659 		return -1;
2660 	else
2661 		return idx + tx->offset;
2662 }
2663 
2664 /**
2665  * ice_ptp_process_ts - Process the PTP Tx timestamps
2666  * @pf: Board private structure
2667  *
2668  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2669  * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2670  */
ice_ptp_process_ts(struct ice_pf * pf)2671 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2672 {
2673 	switch (pf->ptp.tx_interrupt_mode) {
2674 	case ICE_PTP_TX_INTERRUPT_NONE:
2675 		/* This device has the clock owner handle timestamps for it */
2676 		return ICE_TX_TSTAMP_WORK_DONE;
2677 	case ICE_PTP_TX_INTERRUPT_SELF:
2678 		/* This device handles its own timestamps */
2679 		return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2680 	case ICE_PTP_TX_INTERRUPT_ALL:
2681 		/* This device handles timestamps for all ports */
2682 		return ice_ptp_tx_tstamp_owner(pf);
2683 	default:
2684 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2685 			  pf->ptp.tx_interrupt_mode);
2686 		return ICE_TX_TSTAMP_WORK_DONE;
2687 	}
2688 }
2689 
2690 /**
2691  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2692  * @pf: Board private structure
2693  *
2694  * The device PHY issues Tx timestamp interrupts to the driver for processing
2695  * timestamp data from the PHY. It will not interrupt again until all
2696  * current timestamp data is read. In rare circumstances, it is possible that
2697  * the driver fails to read all outstanding data.
2698  *
2699  * To avoid getting permanently stuck, periodically check if the PHY has
2700  * outstanding timestamp data. If so, trigger an interrupt from software to
2701  * process this data.
2702  */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2703 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2704 {
2705 	struct device *dev = ice_pf_to_dev(pf);
2706 	struct ice_hw *hw = &pf->hw;
2707 	bool trigger_oicr = false;
2708 	unsigned int i;
2709 
2710 	if (ice_is_e810(hw))
2711 		return;
2712 
2713 	if (!ice_pf_src_tmr_owned(pf))
2714 		return;
2715 
2716 	for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
2717 		u64 tstamp_ready;
2718 		int err;
2719 
2720 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2721 		if (!err && tstamp_ready) {
2722 			trigger_oicr = true;
2723 			break;
2724 		}
2725 	}
2726 
2727 	if (trigger_oicr) {
2728 		/* Trigger a software interrupt, to ensure this data
2729 		 * gets processed.
2730 		 */
2731 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2732 
2733 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2734 		ice_flush(hw);
2735 	}
2736 }
2737 
ice_ptp_periodic_work(struct kthread_work * work)2738 static void ice_ptp_periodic_work(struct kthread_work *work)
2739 {
2740 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2741 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2742 	int err;
2743 
2744 	if (pf->ptp.state != ICE_PTP_READY)
2745 		return;
2746 
2747 	err = ice_ptp_update_cached_phctime(pf);
2748 
2749 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2750 
2751 	/* Run twice a second or reschedule if phc update failed */
2752 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2753 				   msecs_to_jiffies(err ? 10 : 500));
2754 }
2755 
2756 /**
2757  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2758  * @pf: Board private structure
2759  * @reset_type: the reset type being performed
2760  */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2761 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2762 {
2763 	struct ice_ptp *ptp = &pf->ptp;
2764 	u8 src_tmr;
2765 
2766 	if (ptp->state != ICE_PTP_READY)
2767 		return;
2768 
2769 	ptp->state = ICE_PTP_RESETTING;
2770 
2771 	/* Disable timestamping for both Tx and Rx */
2772 	ice_ptp_disable_timestamp_mode(pf);
2773 
2774 	kthread_cancel_delayed_work_sync(&ptp->work);
2775 
2776 	if (reset_type == ICE_RESET_PFR)
2777 		return;
2778 
2779 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2780 
2781 	/* Disable periodic outputs */
2782 	ice_ptp_disable_all_clkout(pf);
2783 
2784 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2785 
2786 	/* Disable source clock */
2787 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2788 
2789 	/* Acquire PHC and system timer to restore after reset */
2790 	ptp->reset_time = ktime_get_real_ns();
2791 }
2792 
2793 /**
2794  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2795  * @pf: Board private structure
2796  *
2797  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2798  * PTP clock owner instance should perform.
2799  */
ice_ptp_rebuild_owner(struct ice_pf * pf)2800 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2801 {
2802 	struct ice_ptp *ptp = &pf->ptp;
2803 	struct ice_hw *hw = &pf->hw;
2804 	struct timespec64 ts;
2805 	u64 time_diff;
2806 	int err;
2807 
2808 	err = ice_ptp_init_phc(hw);
2809 	if (err)
2810 		return err;
2811 
2812 	/* Acquire the global hardware lock */
2813 	if (!ice_ptp_lock(hw)) {
2814 		err = -EBUSY;
2815 		return err;
2816 	}
2817 
2818 	/* Write the increment time value to PHY and LAN */
2819 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2820 	if (err) {
2821 		ice_ptp_unlock(hw);
2822 		return err;
2823 	}
2824 
2825 	/* Write the initial Time value to PHY and LAN using the cached PHC
2826 	 * time before the reset and time difference between stopping and
2827 	 * starting the clock.
2828 	 */
2829 	if (ptp->cached_phc_time) {
2830 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2831 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2832 	} else {
2833 		ts = ktime_to_timespec64(ktime_get_real());
2834 	}
2835 	err = ice_ptp_write_init(pf, &ts);
2836 	if (err) {
2837 		ice_ptp_unlock(hw);
2838 		return err;
2839 	}
2840 
2841 	/* Release the global hardware lock */
2842 	ice_ptp_unlock(hw);
2843 
2844 	/* Flush software tracking of any outstanding timestamps since we're
2845 	 * about to flush the PHY timestamp block.
2846 	 */
2847 	ice_ptp_flush_all_tx_tracker(pf);
2848 
2849 	if (!ice_is_e810(hw)) {
2850 		/* Enable quad interrupts */
2851 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2852 		if (err)
2853 			return err;
2854 
2855 		ice_ptp_restart_all_phy(pf);
2856 	}
2857 
2858 	/* Re-enable all periodic outputs and external timestamp events */
2859 	ice_ptp_enable_all_clkout(pf);
2860 	ice_ptp_enable_all_extts(pf);
2861 
2862 	return 0;
2863 }
2864 
2865 /**
2866  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2867  * @pf: Board private structure
2868  * @reset_type: the reset type being performed
2869  */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)2870 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2871 {
2872 	struct ice_ptp *ptp = &pf->ptp;
2873 	int err;
2874 
2875 	if (ptp->state == ICE_PTP_READY) {
2876 		ice_ptp_prepare_for_reset(pf, reset_type);
2877 	} else if (ptp->state != ICE_PTP_RESETTING) {
2878 		err = -EINVAL;
2879 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2880 		goto err;
2881 	}
2882 
2883 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2884 		err = ice_ptp_rebuild_owner(pf);
2885 		if (err)
2886 			goto err;
2887 	}
2888 
2889 	ptp->state = ICE_PTP_READY;
2890 
2891 	/* Start periodic work going */
2892 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2893 
2894 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2895 	return;
2896 
2897 err:
2898 	ptp->state = ICE_PTP_ERROR;
2899 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2900 }
2901 
ice_is_primary(struct ice_hw * hw)2902 static bool ice_is_primary(struct ice_hw *hw)
2903 {
2904 	return ice_is_e825c(hw) && ice_is_dual(hw) ?
2905 		!!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
2906 }
2907 
ice_ptp_setup_adapter(struct ice_pf * pf)2908 static int ice_ptp_setup_adapter(struct ice_pf *pf)
2909 {
2910 	if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
2911 		return -EPERM;
2912 
2913 	pf->adapter->ctrl_pf = pf;
2914 
2915 	return 0;
2916 }
2917 
ice_ptp_setup_pf(struct ice_pf * pf)2918 static int ice_ptp_setup_pf(struct ice_pf *pf)
2919 {
2920 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
2921 	struct ice_ptp *ptp = &pf->ptp;
2922 
2923 	if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
2924 		return -ENODEV;
2925 
2926 	INIT_LIST_HEAD(&ptp->port.list_node);
2927 	mutex_lock(&pf->adapter->ports.lock);
2928 
2929 	list_add(&ptp->port.list_node,
2930 		 &pf->adapter->ports.ports);
2931 	mutex_unlock(&pf->adapter->ports.lock);
2932 
2933 	return 0;
2934 }
2935 
ice_ptp_cleanup_pf(struct ice_pf * pf)2936 static void ice_ptp_cleanup_pf(struct ice_pf *pf)
2937 {
2938 	struct ice_ptp *ptp = &pf->ptp;
2939 
2940 	if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
2941 		mutex_lock(&pf->adapter->ports.lock);
2942 		list_del(&ptp->port.list_node);
2943 		mutex_unlock(&pf->adapter->ports.lock);
2944 	}
2945 }
2946 /**
2947  * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
2948  * @aux_dev: auxiliary device to get the auxiliary PF for
2949  */
2950 static struct ice_pf *
ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device * aux_dev)2951 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
2952 {
2953 	struct ice_ptp_port *aux_port;
2954 	struct ice_ptp *aux_ptp;
2955 
2956 	aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
2957 	aux_ptp = container_of(aux_port, struct ice_ptp, port);
2958 
2959 	return container_of(aux_ptp, struct ice_pf, ptp);
2960 }
2961 
2962 /**
2963  * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
2964  * @aux_dev: auxiliary device to get the PF for
2965  */
2966 static struct ice_pf *
ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device * aux_dev)2967 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
2968 {
2969 	struct ice_ptp_port_owner *ports_owner;
2970 	const struct auxiliary_driver *aux_drv;
2971 	struct ice_ptp *owner_ptp;
2972 
2973 	if (!aux_dev->dev.driver)
2974 		return NULL;
2975 
2976 	aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
2977 	ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
2978 				   aux_driver);
2979 	owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
2980 	return container_of(owner_ptp, struct ice_pf, ptp);
2981 }
2982 
2983 /**
2984  * ice_ptp_auxbus_probe - Probe auxiliary devices
2985  * @aux_dev: PF's auxiliary device
2986  * @id: Auxiliary device ID
2987  */
ice_ptp_auxbus_probe(struct auxiliary_device * aux_dev,const struct auxiliary_device_id * id)2988 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
2989 				const struct auxiliary_device_id *id)
2990 {
2991 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2992 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2993 
2994 	if (WARN_ON(!owner_pf))
2995 		return -ENODEV;
2996 
2997 	INIT_LIST_HEAD(&aux_pf->ptp.port.list_node);
2998 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2999 	list_add(&aux_pf->ptp.port.list_node,
3000 		 &owner_pf->ptp.ports_owner.ports);
3001 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
3002 
3003 	return 0;
3004 }
3005 
3006 /**
3007  * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
3008  * @aux_dev: PF's auxiliary device
3009  */
ice_ptp_auxbus_remove(struct auxiliary_device * aux_dev)3010 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
3011 {
3012 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
3013 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
3014 
3015 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
3016 	list_del(&aux_pf->ptp.port.list_node);
3017 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
3018 }
3019 
3020 /**
3021  * ice_ptp_auxbus_shutdown
3022  * @aux_dev: PF's auxiliary device
3023  */
ice_ptp_auxbus_shutdown(struct auxiliary_device * aux_dev)3024 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
3025 {
3026 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
3027 }
3028 
3029 /**
3030  * ice_ptp_auxbus_suspend
3031  * @aux_dev: PF's auxiliary device
3032  * @state: power management state indicator
3033  */
3034 static int
ice_ptp_auxbus_suspend(struct auxiliary_device * aux_dev,pm_message_t state)3035 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
3036 {
3037 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
3038 	return 0;
3039 }
3040 
3041 /**
3042  * ice_ptp_auxbus_resume
3043  * @aux_dev: PF's auxiliary device
3044  */
ice_ptp_auxbus_resume(struct auxiliary_device * aux_dev)3045 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
3046 {
3047 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
3048 	return 0;
3049 }
3050 
3051 /**
3052  * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
3053  * @pf: Board private structure
3054  * @name: auxiliary bus driver name
3055  */
3056 static struct auxiliary_device_id *
ice_ptp_auxbus_create_id_table(struct ice_pf * pf,const char * name)3057 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
3058 {
3059 	struct auxiliary_device_id *ids;
3060 
3061 	/* Second id left empty to terminate the array */
3062 	ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
3063 			   sizeof(struct auxiliary_device_id), GFP_KERNEL);
3064 	if (!ids)
3065 		return NULL;
3066 
3067 	snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
3068 
3069 	return ids;
3070 }
3071 
3072 /**
3073  * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
3074  * @pf: Board private structure
3075  */
ice_ptp_register_auxbus_driver(struct ice_pf * pf)3076 static int __always_unused ice_ptp_register_auxbus_driver(struct ice_pf *pf)
3077 {
3078 	struct auxiliary_driver *aux_driver;
3079 	struct ice_ptp *ptp;
3080 	struct device *dev;
3081 	char *name;
3082 	int err;
3083 
3084 	ptp = &pf->ptp;
3085 	dev = ice_pf_to_dev(pf);
3086 	aux_driver = &ptp->ports_owner.aux_driver;
3087 	INIT_LIST_HEAD(&ptp->ports_owner.ports);
3088 	mutex_init(&ptp->ports_owner.lock);
3089 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3090 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3091 			      ice_get_ptp_src_clock_index(&pf->hw));
3092 	if (!name)
3093 		return -ENOMEM;
3094 
3095 	aux_driver->name = name;
3096 	aux_driver->shutdown = ice_ptp_auxbus_shutdown;
3097 	aux_driver->suspend = ice_ptp_auxbus_suspend;
3098 	aux_driver->remove = ice_ptp_auxbus_remove;
3099 	aux_driver->resume = ice_ptp_auxbus_resume;
3100 	aux_driver->probe = ice_ptp_auxbus_probe;
3101 	aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
3102 	if (!aux_driver->id_table)
3103 		return -ENOMEM;
3104 
3105 	err = auxiliary_driver_register(aux_driver);
3106 	if (err) {
3107 		devm_kfree(dev, aux_driver->id_table);
3108 		dev_err(dev, "Failed registering aux_driver, name <%s>\n",
3109 			name);
3110 	}
3111 
3112 	return err;
3113 }
3114 
3115 /**
3116  * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
3117  * @pf: Board private structure
3118  */
ice_ptp_unregister_auxbus_driver(struct ice_pf * pf)3119 static void __always_unused ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
3120 {
3121 	struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
3122 
3123 	auxiliary_driver_unregister(aux_driver);
3124 	devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
3125 
3126 	mutex_destroy(&pf->ptp.ports_owner.lock);
3127 }
3128 
3129 /**
3130  * ice_ptp_clock_index - Get the PTP clock index for this device
3131  * @pf: Board private structure
3132  *
3133  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
3134  * is associated.
3135  */
ice_ptp_clock_index(struct ice_pf * pf)3136 int ice_ptp_clock_index(struct ice_pf *pf)
3137 {
3138 	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
3139 	struct ptp_clock *clock;
3140 
3141 	if (!ctrl_ptp)
3142 		return -1;
3143 	clock = ctrl_ptp->clock;
3144 
3145 	return clock ? ptp_clock_index(clock) : -1;
3146 }
3147 
3148 /**
3149  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
3150  * @pf: Board private structure
3151  *
3152  * Setup and initialize a PTP clock device that represents the device hardware
3153  * clock. Save the clock index for other functions connected to the same
3154  * hardware resource.
3155  */
ice_ptp_init_owner(struct ice_pf * pf)3156 static int ice_ptp_init_owner(struct ice_pf *pf)
3157 {
3158 	struct ice_hw *hw = &pf->hw;
3159 	struct timespec64 ts;
3160 	int err;
3161 
3162 	err = ice_ptp_init_phc(hw);
3163 	if (err) {
3164 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
3165 			err);
3166 		return err;
3167 	}
3168 
3169 	/* Acquire the global hardware lock */
3170 	if (!ice_ptp_lock(hw)) {
3171 		err = -EBUSY;
3172 		goto err_exit;
3173 	}
3174 
3175 	/* Write the increment time value to PHY and LAN */
3176 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
3177 	if (err) {
3178 		ice_ptp_unlock(hw);
3179 		goto err_exit;
3180 	}
3181 
3182 	ts = ktime_to_timespec64(ktime_get_real());
3183 	/* Write the initial Time value to PHY and LAN */
3184 	err = ice_ptp_write_init(pf, &ts);
3185 	if (err) {
3186 		ice_ptp_unlock(hw);
3187 		goto err_exit;
3188 	}
3189 
3190 	/* Release the global hardware lock */
3191 	ice_ptp_unlock(hw);
3192 
3193 	/* Configure PHY interrupt settings */
3194 	err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3195 	if (err)
3196 		goto err_exit;
3197 
3198 	/* Ensure we have a clock device */
3199 	err = ice_ptp_create_clock(pf);
3200 	if (err)
3201 		goto err_clk;
3202 
3203 	return 0;
3204 err_clk:
3205 	pf->ptp.clock = NULL;
3206 err_exit:
3207 	return err;
3208 }
3209 
3210 /**
3211  * ice_ptp_init_work - Initialize PTP work threads
3212  * @pf: Board private structure
3213  * @ptp: PF PTP structure
3214  */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3215 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3216 {
3217 	struct kthread_worker *kworker;
3218 
3219 	/* Initialize work functions */
3220 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3221 
3222 	/* Allocate a kworker for handling work required for the ports
3223 	 * connected to the PTP hardware clock.
3224 	 */
3225 	kworker = kthread_create_worker(0, "ice-ptp-%s",
3226 					dev_name(ice_pf_to_dev(pf)));
3227 	if (IS_ERR(kworker))
3228 		return PTR_ERR(kworker);
3229 
3230 	ptp->kworker = kworker;
3231 
3232 	/* Start periodic work going */
3233 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3234 
3235 	return 0;
3236 }
3237 
3238 /**
3239  * ice_ptp_init_port - Initialize PTP port structure
3240  * @pf: Board private structure
3241  * @ptp_port: PTP port structure
3242  */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3243 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3244 {
3245 	struct ice_hw *hw = &pf->hw;
3246 
3247 	mutex_init(&ptp_port->ps_lock);
3248 
3249 	switch (ice_get_phy_model(hw)) {
3250 	case ICE_PHY_ETH56G:
3251 		return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
3252 					      ptp_port->port_num);
3253 	case ICE_PHY_E810:
3254 		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3255 	case ICE_PHY_E82X:
3256 		kthread_init_delayed_work(&ptp_port->ov_work,
3257 					  ice_ptp_wait_for_offsets);
3258 
3259 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3260 					    ptp_port->port_num);
3261 	default:
3262 		return -ENODEV;
3263 	}
3264 }
3265 
3266 /**
3267  * ice_ptp_release_auxbus_device
3268  * @dev: device that utilizes the auxbus
3269  */
ice_ptp_release_auxbus_device(struct device * dev)3270 static void ice_ptp_release_auxbus_device(struct device *dev)
3271 {
3272 	/* Doing nothing here, but handle to auxbux device must be satisfied */
3273 }
3274 
3275 /**
3276  * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
3277  * @pf: Board private structure
3278  */
ice_ptp_create_auxbus_device(struct ice_pf * pf)3279 static __always_unused int ice_ptp_create_auxbus_device(struct ice_pf *pf)
3280 {
3281 	struct auxiliary_device *aux_dev;
3282 	struct ice_ptp *ptp;
3283 	struct device *dev;
3284 	char *name;
3285 	int err;
3286 	u32 id;
3287 
3288 	ptp = &pf->ptp;
3289 	id = ptp->port.port_num;
3290 	dev = ice_pf_to_dev(pf);
3291 
3292 	aux_dev = &ptp->port.aux_dev;
3293 
3294 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3295 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3296 			      ice_get_ptp_src_clock_index(&pf->hw));
3297 	if (!name)
3298 		return -ENOMEM;
3299 
3300 	aux_dev->name = name;
3301 	aux_dev->id = id;
3302 	aux_dev->dev.release = ice_ptp_release_auxbus_device;
3303 	aux_dev->dev.parent = dev;
3304 
3305 	err = auxiliary_device_init(aux_dev);
3306 	if (err)
3307 		goto aux_err;
3308 
3309 	err = auxiliary_device_add(aux_dev);
3310 	if (err) {
3311 		auxiliary_device_uninit(aux_dev);
3312 		goto aux_err;
3313 	}
3314 
3315 	return 0;
3316 aux_err:
3317 	dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
3318 	devm_kfree(dev, name);
3319 	return err;
3320 }
3321 
3322 /**
3323  * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
3324  * @pf: Board private structure
3325  */
ice_ptp_remove_auxbus_device(struct ice_pf * pf)3326 static __always_unused void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
3327 {
3328 	struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
3329 
3330 	auxiliary_device_delete(aux_dev);
3331 	auxiliary_device_uninit(aux_dev);
3332 
3333 	memset(aux_dev, 0, sizeof(*aux_dev));
3334 }
3335 
3336 /**
3337  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3338  * @pf: Board private structure
3339  *
3340  * Initialize the Tx timestamp interrupt mode for this device. For most device
3341  * types, each PF processes the interrupt and manages its own timestamps. For
3342  * E822-based devices, only the clock owner processes the timestamps. Other
3343  * PFs disable the interrupt and do not process their own timestamps.
3344  */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3345 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3346 {
3347 	switch (ice_get_phy_model(&pf->hw)) {
3348 	case ICE_PHY_E82X:
3349 		/* E822 based PHY has the clock owner process the interrupt
3350 		 * for all ports.
3351 		 */
3352 		if (ice_pf_src_tmr_owned(pf))
3353 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3354 		else
3355 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3356 		break;
3357 	default:
3358 		/* other PHY types handle their own Tx interrupt */
3359 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3360 	}
3361 }
3362 
3363 /**
3364  * ice_ptp_init - Initialize PTP hardware clock support
3365  * @pf: Board private structure
3366  *
3367  * Set up the device for interacting with the PTP hardware clock for all
3368  * functions, both the function that owns the clock hardware, and the
3369  * functions connected to the clock hardware.
3370  *
3371  * The clock owner will allocate and register a ptp_clock with the
3372  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3373  * items used for asynchronous work such as Tx timestamps and periodic work.
3374  */
ice_ptp_init(struct ice_pf * pf)3375 void ice_ptp_init(struct ice_pf *pf)
3376 {
3377 	struct ice_ptp *ptp = &pf->ptp;
3378 	struct ice_hw *hw = &pf->hw;
3379 	int lane_num, err;
3380 
3381 	ptp->state = ICE_PTP_INITIALIZING;
3382 
3383 	lane_num = ice_get_phy_lane_number(hw);
3384 	if (lane_num < 0) {
3385 		err = lane_num;
3386 		goto err_exit;
3387 	}
3388 
3389 	ptp->port.port_num = (u8)lane_num;
3390 	ice_ptp_init_hw(hw);
3391 
3392 	ice_ptp_init_tx_interrupt_mode(pf);
3393 
3394 	/* If this function owns the clock hardware, it must allocate and
3395 	 * configure the PTP clock device to represent it.
3396 	 */
3397 	if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
3398 		err = ice_ptp_setup_adapter(pf);
3399 		if (err)
3400 			goto err_exit;
3401 		err = ice_ptp_init_owner(pf);
3402 		if (err)
3403 			goto err_exit;
3404 	}
3405 
3406 	err = ice_ptp_setup_pf(pf);
3407 	if (err)
3408 		goto err_exit;
3409 
3410 	err = ice_ptp_init_port(pf, &ptp->port);
3411 	if (err)
3412 		goto err_exit;
3413 
3414 	/* Start the PHY timestamping block */
3415 	ice_ptp_reset_phy_timestamping(pf);
3416 
3417 	/* Configure initial Tx interrupt settings */
3418 	ice_ptp_cfg_tx_interrupt(pf);
3419 
3420 	ptp->state = ICE_PTP_READY;
3421 
3422 	err = ice_ptp_init_work(pf, ptp);
3423 	if (err)
3424 		goto err_exit;
3425 
3426 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3427 	return;
3428 
3429 err_exit:
3430 	/* If we registered a PTP clock, release it */
3431 	if (pf->ptp.clock) {
3432 		ptp_clock_unregister(ptp->clock);
3433 		pf->ptp.clock = NULL;
3434 	}
3435 	ptp->state = ICE_PTP_ERROR;
3436 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3437 }
3438 
3439 /**
3440  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3441  * @pf: Board private structure
3442  *
3443  * This function handles the cleanup work required from the initialization by
3444  * clearing out the important information and unregistering the clock
3445  */
ice_ptp_release(struct ice_pf * pf)3446 void ice_ptp_release(struct ice_pf *pf)
3447 {
3448 	if (pf->ptp.state != ICE_PTP_READY)
3449 		return;
3450 
3451 	pf->ptp.state = ICE_PTP_UNINIT;
3452 
3453 	/* Disable timestamping for both Tx and Rx */
3454 	ice_ptp_disable_timestamp_mode(pf);
3455 
3456 	ice_ptp_cleanup_pf(pf);
3457 
3458 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3459 
3460 	ice_ptp_disable_all_extts(pf);
3461 
3462 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3463 
3464 	ice_ptp_port_phy_stop(&pf->ptp.port);
3465 	mutex_destroy(&pf->ptp.port.ps_lock);
3466 	if (pf->ptp.kworker) {
3467 		kthread_destroy_worker(pf->ptp.kworker);
3468 		pf->ptp.kworker = NULL;
3469 	}
3470 
3471 	if (!pf->ptp.clock)
3472 		return;
3473 
3474 	/* Disable periodic outputs */
3475 	ice_ptp_disable_all_clkout(pf);
3476 
3477 	ptp_clock_unregister(pf->ptp.clock);
3478 	pf->ptp.clock = NULL;
3479 
3480 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3481 }
3482