• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include "qed.h"
9 #include "qed_dev_api.h"
10 #include "qed_hw.h"
11 #include "qed_l2.h"
12 #include "qed_mcp.h"
13 #include "qed_ptp.h"
14 #include "qed_reg_addr.h"
15 
16 /* 16 nano second time quantas to wait before making a Drift adjustment */
17 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT	0
18 /* Nano seconds to add/subtract when making a Drift adjustment */
19 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT		28
20 /* Add/subtract the Adjustment_Value when making a Drift adjustment */
21 #define QED_DRIFT_CNTR_DIRECTION_SHIFT		31
22 #define QED_TIMESTAMP_MASK			BIT(16)
23 /* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
24 #define QED_PTP_UCAST_PARAM_MASK              0x70F
25 
qed_ptcdev_to_resc(struct qed_hwfn * p_hwfn)26 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
27 {
28 	switch (MFW_PORT(p_hwfn)) {
29 	case 0:
30 		return QED_RESC_LOCK_PTP_PORT0;
31 	case 1:
32 		return QED_RESC_LOCK_PTP_PORT1;
33 	case 2:
34 		return QED_RESC_LOCK_PTP_PORT2;
35 	case 3:
36 		return QED_RESC_LOCK_PTP_PORT3;
37 	default:
38 		return QED_RESC_LOCK_RESC_INVALID;
39 	}
40 }
41 
qed_ptp_res_lock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)42 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
43 {
44 	struct qed_resc_lock_params params;
45 	enum qed_resc_lock resource;
46 	int rc;
47 
48 	resource = qed_ptcdev_to_resc(p_hwfn);
49 	if (resource == QED_RESC_LOCK_RESC_INVALID)
50 		return -EINVAL;
51 
52 	qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
53 
54 	rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
55 	if (rc && rc != -EINVAL) {
56 		return rc;
57 	} else if (rc == -EINVAL) {
58 		/* MFW doesn't support resource locking, first PF on the port
59 		 * has lock ownership.
60 		 */
61 		if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
62 			return 0;
63 
64 		DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
65 		return -EBUSY;
66 	} else if (!params.b_granted) {
67 		DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
68 		return -EBUSY;
69 	}
70 
71 	return 0;
72 }
73 
qed_ptp_res_unlock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)74 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
75 {
76 	struct qed_resc_unlock_params params;
77 	enum qed_resc_lock resource;
78 	int rc;
79 
80 	resource = qed_ptcdev_to_resc(p_hwfn);
81 	if (resource == QED_RESC_LOCK_RESC_INVALID)
82 		return -EINVAL;
83 
84 	qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
85 
86 	rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
87 	if (rc == -EINVAL) {
88 		/* MFW doesn't support locking, first PF has lock ownership */
89 		if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
90 			rc = 0;
91 		} else {
92 			DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
93 			return -EINVAL;
94 		}
95 	} else if (rc) {
96 		DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
97 	}
98 
99 	return rc;
100 }
101 
102 /* Read Rx timestamp */
qed_ptp_hw_read_rx_ts(struct qed_dev * cdev,u64 * timestamp)103 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
104 {
105 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
106 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
107 	u32 val;
108 
109 	*timestamp = 0;
110 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
111 	if (!(val & QED_TIMESTAMP_MASK)) {
112 		DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
113 		return -EINVAL;
114 	}
115 
116 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
117 	*timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
118 	*timestamp <<= 32;
119 	*timestamp |= val;
120 
121 	/* Reset timestamp register to allow new timestamp */
122 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
123 	       QED_TIMESTAMP_MASK);
124 
125 	return 0;
126 }
127 
128 /* Read Tx timestamp */
qed_ptp_hw_read_tx_ts(struct qed_dev * cdev,u64 * timestamp)129 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
130 {
131 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
132 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
133 	u32 val;
134 
135 	*timestamp = 0;
136 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
137 	if (!(val & QED_TIMESTAMP_MASK)) {
138 		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
139 			   "Invalid Tx timestamp, buf_seqid = %08x\n", val);
140 		return -EINVAL;
141 	}
142 
143 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
144 	*timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
145 	*timestamp <<= 32;
146 	*timestamp |= val;
147 
148 	/* Reset timestamp register to allow new timestamp */
149 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
150 
151 	return 0;
152 }
153 
154 /* Read Phy Hardware Clock */
qed_ptp_hw_read_cc(struct qed_dev * cdev,u64 * phc_cycles)155 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
156 {
157 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
158 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
159 	u32 temp = 0;
160 
161 	temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
162 	*phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
163 	*phc_cycles <<= 32;
164 	*phc_cycles |= temp;
165 
166 	return 0;
167 }
168 
169 /* Filter PTP protocol packets that need to be timestamped */
qed_ptp_hw_cfg_filters(struct qed_dev * cdev,enum qed_ptp_filter_type rx_type,enum qed_ptp_hwtstamp_tx_type tx_type)170 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
171 				  enum qed_ptp_filter_type rx_type,
172 				  enum qed_ptp_hwtstamp_tx_type tx_type)
173 {
174 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
175 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
176 	u32 rule_mask, enable_cfg = 0x0;
177 
178 	switch (rx_type) {
179 	case QED_PTP_FILTER_NONE:
180 		enable_cfg = 0x0;
181 		rule_mask = 0x3FFF;
182 		break;
183 	case QED_PTP_FILTER_ALL:
184 		enable_cfg = 0x7;
185 		rule_mask = 0x3CAA;
186 		break;
187 	case QED_PTP_FILTER_V1_L4_EVENT:
188 		enable_cfg = 0x3;
189 		rule_mask = 0x3FFA;
190 		break;
191 	case QED_PTP_FILTER_V1_L4_GEN:
192 		enable_cfg = 0x3;
193 		rule_mask = 0x3FFE;
194 		break;
195 	case QED_PTP_FILTER_V2_L4_EVENT:
196 		enable_cfg = 0x5;
197 		rule_mask = 0x3FAA;
198 		break;
199 	case QED_PTP_FILTER_V2_L4_GEN:
200 		enable_cfg = 0x5;
201 		rule_mask = 0x3FEE;
202 		break;
203 	case QED_PTP_FILTER_V2_L2_EVENT:
204 		enable_cfg = 0x5;
205 		rule_mask = 0x3CFF;
206 		break;
207 	case QED_PTP_FILTER_V2_L2_GEN:
208 		enable_cfg = 0x5;
209 		rule_mask = 0x3EFF;
210 		break;
211 	case QED_PTP_FILTER_V2_EVENT:
212 		enable_cfg = 0x5;
213 		rule_mask = 0x3CAA;
214 		break;
215 	case QED_PTP_FILTER_V2_GEN:
216 		enable_cfg = 0x5;
217 		rule_mask = 0x3EEE;
218 		break;
219 	default:
220 		DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
221 		return -EINVAL;
222 	}
223 
224 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
225 	       QED_PTP_UCAST_PARAM_MASK);
226 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
227 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
228 
229 	if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
230 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
231 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
232 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
233 	} else {
234 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
235 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
236 		       QED_PTP_UCAST_PARAM_MASK);
237 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
238 	}
239 
240 	/* Reset possibly old timestamps */
241 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
242 	       QED_TIMESTAMP_MASK);
243 
244 	return 0;
245 }
246 
247 /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
248  * FW/HW accepts the adjustment value in terms of 3 parameters:
249  *   Drift period - adjustment happens once in certain number of nano seconds.
250  *   Drift value - time is adjusted by a certain value, for example by 5 ns.
251  *   Drift direction - add or subtract the adjustment value.
252  * The routine translates ppb into the adjustment triplet in an optimal manner.
253  */
qed_ptp_hw_adjfreq(struct qed_dev * cdev,s32 ppb)254 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
255 {
256 	s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
257 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
258 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
259 	u32 drift_ctr_cfg = 0, drift_state;
260 	int drift_dir = 1;
261 
262 	if (ppb < 0) {
263 		ppb = -ppb;
264 		drift_dir = 0;
265 	}
266 
267 	if (ppb > 1) {
268 		s64 best_dif = ppb, best_approx_dev = 1;
269 
270 		/* Adjustment value is up to +/-7ns, find an optimal value in
271 		 * this range.
272 		 */
273 		for (val = 7; val > 0; val--) {
274 			period = div_s64(val * 1000000000, ppb);
275 			period -= 8;
276 			period >>= 4;
277 			if (period < 1)
278 				period = 1;
279 			if (period > 0xFFFFFFE)
280 				period = 0xFFFFFFE;
281 
282 			/* Check both rounding ends for approximate error */
283 			approx_dev = period * 16 + 8;
284 			dif = ppb * approx_dev - val * 1000000000;
285 			dif2 = dif + 16 * ppb;
286 
287 			if (dif < 0)
288 				dif = -dif;
289 			if (dif2 < 0)
290 				dif2 = -dif2;
291 
292 			/* Determine which end gives better approximation */
293 			if (dif * (approx_dev + 16) > dif2 * approx_dev) {
294 				period++;
295 				approx_dev += 16;
296 				dif = dif2;
297 			}
298 
299 			/* Track best approximation found so far */
300 			if (best_dif * approx_dev > dif * best_approx_dev) {
301 				best_dif = dif;
302 				best_val = val;
303 				best_period = period;
304 				best_approx_dev = approx_dev;
305 			}
306 		}
307 	} else if (ppb == 1) {
308 		/* This is a special case as its the only value which wouldn't
309 		 * fit in a s64 variable. In order to prevent castings simple
310 		 * handle it seperately.
311 		 */
312 		best_val = 4;
313 		best_period = 0xee6b27f;
314 	} else {
315 		best_val = 0;
316 		best_period = 0xFFFFFFF;
317 	}
318 
319 	drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
320 			(((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
321 			(((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
322 
323 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
324 
325 	drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
326 	if (drift_state & 1) {
327 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
328 		       drift_ctr_cfg);
329 	} else {
330 		DP_INFO(p_hwfn, "Drift counter is not reset\n");
331 		return -EINVAL;
332 	}
333 
334 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
335 
336 	return 0;
337 }
338 
qed_ptp_hw_enable(struct qed_dev * cdev)339 static int qed_ptp_hw_enable(struct qed_dev *cdev)
340 {
341 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
342 	struct qed_ptt *p_ptt;
343 	int rc;
344 
345 	p_ptt = qed_ptt_acquire(p_hwfn);
346 	if (!p_ptt) {
347 		DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
348 		return -EBUSY;
349 	}
350 
351 	p_hwfn->p_ptp_ptt = p_ptt;
352 
353 	rc = qed_ptp_res_lock(p_hwfn, p_ptt);
354 	if (rc) {
355 		DP_INFO(p_hwfn,
356 			"Couldn't acquire the resource lock, skip ptp enable for this PF\n");
357 		qed_ptt_release(p_hwfn, p_ptt);
358 		p_hwfn->p_ptp_ptt = NULL;
359 		return rc;
360 	}
361 
362 	/* Reset PTP event detection rules - will be configured in the IOCTL */
363 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
364 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
365 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
366 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
367 
368 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
369 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
370 
371 	qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
372 
373 	/* Pause free running counter */
374 	if (QED_IS_BB_B0(p_hwfn->cdev))
375 		qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
376 	if (QED_IS_AH(p_hwfn->cdev))
377 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
378 
379 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
380 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
381 	/* Resume free running counter */
382 	if (QED_IS_BB_B0(p_hwfn->cdev))
383 		qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
384 	if (QED_IS_AH(p_hwfn->cdev)) {
385 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
386 		qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
387 	}
388 
389 	/* Disable drift register */
390 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
391 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
392 
393 	/* Reset possibly old timestamps */
394 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
395 	       QED_TIMESTAMP_MASK);
396 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
397 
398 	return 0;
399 }
400 
qed_ptp_hw_disable(struct qed_dev * cdev)401 static int qed_ptp_hw_disable(struct qed_dev *cdev)
402 {
403 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
404 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
405 
406 	qed_ptp_res_unlock(p_hwfn, p_ptt);
407 
408 	/* Reset PTP event detection rules */
409 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
410 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
411 
412 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
413 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
414 
415 	/* Disable the PTP feature */
416 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
417 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
418 
419 	qed_ptt_release(p_hwfn, p_ptt);
420 	p_hwfn->p_ptp_ptt = NULL;
421 
422 	return 0;
423 }
424 
425 const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
426 	.cfg_filters = qed_ptp_hw_cfg_filters,
427 	.read_rx_ts = qed_ptp_hw_read_rx_ts,
428 	.read_tx_ts = qed_ptp_hw_read_tx_ts,
429 	.read_cc = qed_ptp_hw_read_cc,
430 	.adjfreq = qed_ptp_hw_adjfreq,
431 	.disable = qed_ptp_hw_disable,
432 	.enable = qed_ptp_hw_enable,
433 };
434