1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/phy.h>
18 #include <linux/of.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21
22 #include "cgx.h"
23
24 #define DRV_NAME "octeontx2-cgx"
25 #define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
26
27 /**
28 * struct lmac
29 * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
30 * @cmd_lock: Lock to serialize the command interface
31 * @resp: command response
32 * @link_info: link related information
33 * @event_cb: callback for linkchange events
34 * @event_cb_lock: lock for serializing callback with unregister
35 * @cmd_pend: flag set before new command is started
36 * flag cleared after command response is received
37 * @cgx: parent cgx port
38 * @lmac_id: lmac port id
39 * @name: lmac port name
40 */
41 struct lmac {
42 wait_queue_head_t wq_cmd_cmplt;
43 struct mutex cmd_lock;
44 u64 resp;
45 struct cgx_link_user_info link_info;
46 struct cgx_event_cb event_cb;
47 spinlock_t event_cb_lock;
48 bool cmd_pend;
49 struct cgx *cgx;
50 u8 lmac_id;
51 char *name;
52 };
53
54 struct cgx {
55 void __iomem *reg_base;
56 struct pci_dev *pdev;
57 u8 cgx_id;
58 u8 lmac_count;
59 struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
60 struct work_struct cgx_cmd_work;
61 struct workqueue_struct *cgx_cmd_workq;
62 struct list_head cgx_list;
63 };
64
65 static LIST_HEAD(cgx_list);
66
67 /* Convert firmware speed encoding to user format(Mbps) */
68 static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
69
70 /* Convert firmware lmac type encoding to string */
71 static char *cgx_lmactype_string[LMAC_MODE_MAX];
72
73 /* CGX PHY management internal APIs */
74 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
75
76 /* Supported devices */
77 static const struct pci_device_id cgx_id_table[] = {
78 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
79 { 0, } /* end of table */
80 };
81
82 MODULE_DEVICE_TABLE(pci, cgx_id_table);
83
cgx_write(struct cgx * cgx,u64 lmac,u64 offset,u64 val)84 static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
85 {
86 writeq(val, cgx->reg_base + (lmac << 18) + offset);
87 }
88
cgx_read(struct cgx * cgx,u64 lmac,u64 offset)89 static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
90 {
91 return readq(cgx->reg_base + (lmac << 18) + offset);
92 }
93
lmac_pdata(u8 lmac_id,struct cgx * cgx)94 static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
95 {
96 if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
97 return NULL;
98
99 return cgx->lmac_idmap[lmac_id];
100 }
101
cgx_get_cgxcnt_max(void)102 int cgx_get_cgxcnt_max(void)
103 {
104 struct cgx *cgx_dev;
105 int idmax = -ENODEV;
106
107 list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
108 if (cgx_dev->cgx_id > idmax)
109 idmax = cgx_dev->cgx_id;
110
111 if (idmax < 0)
112 return 0;
113
114 return idmax + 1;
115 }
116
cgx_get_lmac_cnt(void * cgxd)117 int cgx_get_lmac_cnt(void *cgxd)
118 {
119 struct cgx *cgx = cgxd;
120
121 if (!cgx)
122 return -ENODEV;
123
124 return cgx->lmac_count;
125 }
126
cgx_get_pdata(int cgx_id)127 void *cgx_get_pdata(int cgx_id)
128 {
129 struct cgx *cgx_dev;
130
131 list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
132 if (cgx_dev->cgx_id == cgx_id)
133 return cgx_dev;
134 }
135 return NULL;
136 }
137
cgx_get_cgxid(void * cgxd)138 int cgx_get_cgxid(void *cgxd)
139 {
140 struct cgx *cgx = cgxd;
141
142 if (!cgx)
143 return -EINVAL;
144
145 return cgx->cgx_id;
146 }
147
148 /* Ensure the required lock for event queue(where asynchronous events are
149 * posted) is acquired before calling this API. Else an asynchronous event(with
150 * latest link status) can reach the destination before this function returns
151 * and could make the link status appear wrong.
152 */
cgx_get_link_info(void * cgxd,int lmac_id,struct cgx_link_user_info * linfo)153 int cgx_get_link_info(void *cgxd, int lmac_id,
154 struct cgx_link_user_info *linfo)
155 {
156 struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
157
158 if (!lmac)
159 return -ENODEV;
160
161 *linfo = lmac->link_info;
162 return 0;
163 }
164
mac2u64(u8 * mac_addr)165 static u64 mac2u64 (u8 *mac_addr)
166 {
167 u64 mac = 0;
168 int index;
169
170 for (index = ETH_ALEN - 1; index >= 0; index--)
171 mac |= ((u64)*mac_addr++) << (8 * index);
172 return mac;
173 }
174
cgx_lmac_addr_set(u8 cgx_id,u8 lmac_id,u8 * mac_addr)175 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
176 {
177 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
178 u64 cfg;
179
180 /* copy 6bytes from macaddr */
181 /* memcpy(&cfg, mac_addr, 6); */
182
183 cfg = mac2u64 (mac_addr);
184
185 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
186 cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
187
188 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
189 cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
190 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
191
192 return 0;
193 }
194
cgx_lmac_addr_get(u8 cgx_id,u8 lmac_id)195 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
196 {
197 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
198 u64 cfg;
199
200 cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
201 return cfg & CGX_RX_DMAC_ADR_MASK;
202 }
203
cgx_set_pkind(void * cgxd,u8 lmac_id,int pkind)204 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
205 {
206 struct cgx *cgx = cgxd;
207
208 if (!cgx || lmac_id >= cgx->lmac_count)
209 return -ENODEV;
210
211 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
212 return 0;
213 }
214
cgx_get_lmac_type(struct cgx * cgx,int lmac_id)215 static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
216 {
217 u64 cfg;
218
219 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
220 return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
221 }
222
223 /* Configure CGX LMAC in internal loopback mode */
cgx_lmac_internal_loopback(void * cgxd,int lmac_id,bool enable)224 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
225 {
226 struct cgx *cgx = cgxd;
227 u8 lmac_type;
228 u64 cfg;
229
230 if (!cgx || lmac_id >= cgx->lmac_count)
231 return -ENODEV;
232
233 lmac_type = cgx_get_lmac_type(cgx, lmac_id);
234 if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
235 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
236 if (enable)
237 cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
238 else
239 cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
240 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
241 } else {
242 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
243 if (enable)
244 cfg |= CGXX_SPUX_CONTROL1_LBK;
245 else
246 cfg &= ~CGXX_SPUX_CONTROL1_LBK;
247 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
248 }
249 return 0;
250 }
251
cgx_lmac_promisc_config(int cgx_id,int lmac_id,bool enable)252 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
253 {
254 struct cgx *cgx = cgx_get_pdata(cgx_id);
255 u64 cfg = 0;
256
257 if (!cgx)
258 return;
259
260 if (enable) {
261 /* Enable promiscuous mode on LMAC */
262 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
263 cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
264 cfg |= CGX_DMAC_BCAST_MODE;
265 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
266
267 cfg = cgx_read(cgx, 0,
268 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
269 cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
270 cgx_write(cgx, 0,
271 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
272 } else {
273 /* Disable promiscuous mode */
274 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
275 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
276 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
277 cfg = cgx_read(cgx, 0,
278 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
279 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
280 cgx_write(cgx, 0,
281 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
282 }
283 }
284
285 /* Enable or disable forwarding received pause frames to Tx block */
cgx_lmac_enadis_rx_pause_fwding(void * cgxd,int lmac_id,bool enable)286 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
287 {
288 struct cgx *cgx = cgxd;
289 u64 cfg;
290
291 if (!cgx)
292 return;
293
294 if (enable) {
295 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
296 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
297 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
298
299 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
300 cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
301 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
302 } else {
303 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
304 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
305 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
306
307 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
308 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
309 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
310 }
311 }
312
cgx_get_rx_stats(void * cgxd,int lmac_id,int idx,u64 * rx_stat)313 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
314 {
315 struct cgx *cgx = cgxd;
316
317 if (!cgx || lmac_id >= cgx->lmac_count)
318 return -ENODEV;
319 *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
320 return 0;
321 }
322
cgx_get_tx_stats(void * cgxd,int lmac_id,int idx,u64 * tx_stat)323 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
324 {
325 struct cgx *cgx = cgxd;
326
327 if (!cgx || lmac_id >= cgx->lmac_count)
328 return -ENODEV;
329 *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
330 return 0;
331 }
332
cgx_lmac_rx_tx_enable(void * cgxd,int lmac_id,bool enable)333 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
334 {
335 struct cgx *cgx = cgxd;
336 u64 cfg;
337
338 if (!cgx || lmac_id >= cgx->lmac_count)
339 return -ENODEV;
340
341 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
342 if (enable)
343 cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
344 else
345 cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
346 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
347 return 0;
348 }
349
cgx_lmac_tx_enable(void * cgxd,int lmac_id,bool enable)350 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
351 {
352 struct cgx *cgx = cgxd;
353 u64 cfg, last;
354
355 if (!cgx || lmac_id >= cgx->lmac_count)
356 return -ENODEV;
357
358 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
359 last = cfg;
360 if (enable)
361 cfg |= DATA_PKT_TX_EN;
362 else
363 cfg &= ~DATA_PKT_TX_EN;
364
365 if (cfg != last)
366 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
367 return !!(last & DATA_PKT_TX_EN);
368 }
369
cgx_lmac_get_pause_frm(void * cgxd,int lmac_id,u8 * tx_pause,u8 * rx_pause)370 int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
371 u8 *tx_pause, u8 *rx_pause)
372 {
373 struct cgx *cgx = cgxd;
374 u64 cfg;
375
376 if (!cgx || lmac_id >= cgx->lmac_count)
377 return -ENODEV;
378
379 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
380 *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
381
382 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
383 *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
384 return 0;
385 }
386
cgx_lmac_set_pause_frm(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause)387 int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
388 u8 tx_pause, u8 rx_pause)
389 {
390 struct cgx *cgx = cgxd;
391 u64 cfg;
392
393 if (!cgx || lmac_id >= cgx->lmac_count)
394 return -ENODEV;
395
396 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
397 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
398 cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
399 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
400
401 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
402 cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
403 cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
404 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
405
406 cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
407 if (tx_pause) {
408 cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
409 } else {
410 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
411 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
412 }
413 cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
414 return 0;
415 }
416
cgx_lmac_pause_frm_config(struct cgx * cgx,int lmac_id,bool enable)417 static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
418 {
419 u64 cfg;
420
421 if (!cgx || lmac_id >= cgx->lmac_count)
422 return;
423 if (enable) {
424 /* Enable receive pause frames */
425 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
426 cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
427 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
428
429 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
430 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
431 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
432
433 /* Enable pause frames transmission */
434 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
435 cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
436 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
437
438 /* Set pause time and interval */
439 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
440 DEFAULT_PAUSE_TIME);
441 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
442 cfg &= ~0xFFFFULL;
443 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
444 cfg | (DEFAULT_PAUSE_TIME / 2));
445
446 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
447 DEFAULT_PAUSE_TIME);
448
449 cfg = cgx_read(cgx, lmac_id,
450 CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
451 cfg &= ~0xFFFFULL;
452 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
453 cfg | (DEFAULT_PAUSE_TIME / 2));
454 } else {
455 /* ALL pause frames received are completely ignored */
456 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
457 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
458 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
459
460 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
461 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
462 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
463
464 /* Disable pause frames transmission */
465 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
466 cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
467 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
468 }
469 }
470
cgx_lmac_ptp_config(void * cgxd,int lmac_id,bool enable)471 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
472 {
473 struct cgx *cgx = cgxd;
474 u64 cfg;
475
476 if (!cgx)
477 return;
478
479 if (enable) {
480 /* Enable inbound PTP timestamping */
481 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
482 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
483 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
484
485 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
486 cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
487 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
488 } else {
489 /* Disable inbound PTP stamping */
490 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
491 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
492 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
493
494 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
495 cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
496 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
497 }
498 }
499
500 /* CGX Firmware interface low level support */
cgx_fwi_cmd_send(u64 req,u64 * resp,struct lmac * lmac)501 static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
502 {
503 struct cgx *cgx = lmac->cgx;
504 struct device *dev;
505 int err = 0;
506 u64 cmd;
507
508 /* Ensure no other command is in progress */
509 err = mutex_lock_interruptible(&lmac->cmd_lock);
510 if (err)
511 return err;
512
513 /* Ensure command register is free */
514 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
515 if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
516 err = -EBUSY;
517 goto unlock;
518 }
519
520 /* Update ownership in command request */
521 req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
522
523 /* Mark this lmac as pending, before we start */
524 lmac->cmd_pend = true;
525
526 /* Start command in hardware */
527 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
528
529 /* Ensure command is completed without errors */
530 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
531 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
532 dev = &cgx->pdev->dev;
533 dev_err(dev, "cgx port %d:%d cmd timeout\n",
534 cgx->cgx_id, lmac->lmac_id);
535 err = -EIO;
536 goto unlock;
537 }
538
539 /* we have a valid command response */
540 smp_rmb(); /* Ensure the latest updates are visible */
541 *resp = lmac->resp;
542
543 unlock:
544 mutex_unlock(&lmac->cmd_lock);
545
546 return err;
547 }
548
cgx_fwi_cmd_generic(u64 req,u64 * resp,struct cgx * cgx,int lmac_id)549 static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
550 struct cgx *cgx, int lmac_id)
551 {
552 struct lmac *lmac;
553 int err;
554
555 lmac = lmac_pdata(lmac_id, cgx);
556 if (!lmac)
557 return -ENODEV;
558
559 err = cgx_fwi_cmd_send(req, resp, lmac);
560
561 /* Check for valid response */
562 if (!err) {
563 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
564 return -EIO;
565 else
566 return 0;
567 }
568
569 return err;
570 }
571
cgx_link_usertable_init(void)572 static inline void cgx_link_usertable_init(void)
573 {
574 cgx_speed_mbps[CGX_LINK_NONE] = 0;
575 cgx_speed_mbps[CGX_LINK_10M] = 10;
576 cgx_speed_mbps[CGX_LINK_100M] = 100;
577 cgx_speed_mbps[CGX_LINK_1G] = 1000;
578 cgx_speed_mbps[CGX_LINK_2HG] = 2500;
579 cgx_speed_mbps[CGX_LINK_5G] = 5000;
580 cgx_speed_mbps[CGX_LINK_10G] = 10000;
581 cgx_speed_mbps[CGX_LINK_20G] = 20000;
582 cgx_speed_mbps[CGX_LINK_25G] = 25000;
583 cgx_speed_mbps[CGX_LINK_40G] = 40000;
584 cgx_speed_mbps[CGX_LINK_50G] = 50000;
585 cgx_speed_mbps[CGX_LINK_100G] = 100000;
586
587 cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
588 cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
589 cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
590 cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
591 cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
592 cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
593 cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
594 cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
595 cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
596 cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
597 }
598
link_status_user_format(u64 lstat,struct cgx_link_user_info * linfo,struct cgx * cgx,u8 lmac_id)599 static inline void link_status_user_format(u64 lstat,
600 struct cgx_link_user_info *linfo,
601 struct cgx *cgx, u8 lmac_id)
602 {
603 char *lmac_string;
604
605 linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
606 linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
607 linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
608 linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
609 lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
610 strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
611 }
612
613 /* Hardware event handlers */
cgx_link_change_handler(u64 lstat,struct lmac * lmac)614 static inline void cgx_link_change_handler(u64 lstat,
615 struct lmac *lmac)
616 {
617 struct cgx_link_user_info *linfo;
618 struct cgx *cgx = lmac->cgx;
619 struct cgx_link_event event;
620 struct device *dev;
621 int err_type;
622
623 dev = &cgx->pdev->dev;
624
625 link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
626 err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
627
628 event.cgx_id = cgx->cgx_id;
629 event.lmac_id = lmac->lmac_id;
630
631 /* update the local copy of link status */
632 lmac->link_info = event.link_uinfo;
633 linfo = &lmac->link_info;
634
635 /* Ensure callback doesn't get unregistered until we finish it */
636 spin_lock(&lmac->event_cb_lock);
637
638 if (!lmac->event_cb.notify_link_chg) {
639 dev_dbg(dev, "cgx port %d:%d Link change handler null",
640 cgx->cgx_id, lmac->lmac_id);
641 if (err_type != CGX_ERR_NONE) {
642 dev_err(dev, "cgx port %d:%d Link error %d\n",
643 cgx->cgx_id, lmac->lmac_id, err_type);
644 }
645 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
646 cgx->cgx_id, lmac->lmac_id,
647 linfo->link_up ? "UP" : "DOWN", linfo->speed);
648 goto err;
649 }
650
651 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
652 dev_err(dev, "event notification failure\n");
653 err:
654 spin_unlock(&lmac->event_cb_lock);
655 }
656
cgx_cmdresp_is_linkevent(u64 event)657 static inline bool cgx_cmdresp_is_linkevent(u64 event)
658 {
659 u8 id;
660
661 id = FIELD_GET(EVTREG_ID, event);
662 if (id == CGX_CMD_LINK_BRING_UP ||
663 id == CGX_CMD_LINK_BRING_DOWN)
664 return true;
665 else
666 return false;
667 }
668
cgx_event_is_linkevent(u64 event)669 static inline bool cgx_event_is_linkevent(u64 event)
670 {
671 if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
672 return true;
673 else
674 return false;
675 }
676
cgx_fwi_event_handler(int irq,void * data)677 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
678 {
679 struct lmac *lmac = data;
680 struct cgx *cgx;
681 u64 event;
682
683 cgx = lmac->cgx;
684
685 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
686
687 if (!FIELD_GET(EVTREG_ACK, event))
688 return IRQ_NONE;
689
690 switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
691 case CGX_EVT_CMD_RESP:
692 /* Copy the response. Since only one command is active at a
693 * time, there is no way a response can get overwritten
694 */
695 lmac->resp = event;
696 /* Ensure response is updated before thread context starts */
697 smp_wmb();
698
699 /* There wont be separate events for link change initiated from
700 * software; Hence report the command responses as events
701 */
702 if (cgx_cmdresp_is_linkevent(event))
703 cgx_link_change_handler(event, lmac);
704
705 /* Release thread waiting for completion */
706 lmac->cmd_pend = false;
707 wake_up_interruptible(&lmac->wq_cmd_cmplt);
708 break;
709 case CGX_EVT_ASYNC:
710 if (cgx_event_is_linkevent(event))
711 cgx_link_change_handler(event, lmac);
712 break;
713 }
714
715 /* Any new event or command response will be posted by firmware
716 * only after the current status is acked.
717 * Ack the interrupt register as well.
718 */
719 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
720 cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
721
722 return IRQ_HANDLED;
723 }
724
725 /* APIs for PHY management using CGX firmware interface */
726
727 /* callback registration for hardware events like link change */
cgx_lmac_evh_register(struct cgx_event_cb * cb,void * cgxd,int lmac_id)728 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
729 {
730 struct cgx *cgx = cgxd;
731 struct lmac *lmac;
732
733 lmac = lmac_pdata(lmac_id, cgx);
734 if (!lmac)
735 return -ENODEV;
736
737 lmac->event_cb = *cb;
738
739 return 0;
740 }
741
cgx_lmac_evh_unregister(void * cgxd,int lmac_id)742 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
743 {
744 struct lmac *lmac;
745 unsigned long flags;
746 struct cgx *cgx = cgxd;
747
748 lmac = lmac_pdata(lmac_id, cgx);
749 if (!lmac)
750 return -ENODEV;
751
752 spin_lock_irqsave(&lmac->event_cb_lock, flags);
753 lmac->event_cb.notify_link_chg = NULL;
754 lmac->event_cb.data = NULL;
755 spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
756
757 return 0;
758 }
759
cgx_get_fwdata_base(u64 * base)760 int cgx_get_fwdata_base(u64 *base)
761 {
762 u64 req = 0, resp;
763 struct cgx *cgx;
764 int err;
765
766 cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
767 if (!cgx)
768 return -ENXIO;
769
770 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
771 err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
772 if (!err)
773 *base = FIELD_GET(RESP_FWD_BASE, resp);
774
775 return err;
776 }
777
cgx_fwi_link_change(struct cgx * cgx,int lmac_id,bool enable)778 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
779 {
780 u64 req = 0;
781 u64 resp;
782
783 if (enable)
784 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
785 else
786 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
787
788 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
789 }
790
cgx_fwi_read_version(u64 * resp,struct cgx * cgx)791 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
792 {
793 u64 req = 0;
794
795 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
796 return cgx_fwi_cmd_generic(req, resp, cgx, 0);
797 }
798
cgx_lmac_verify_fwi_version(struct cgx * cgx)799 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
800 {
801 struct device *dev = &cgx->pdev->dev;
802 int major_ver, minor_ver;
803 u64 resp;
804 int err;
805
806 if (!cgx->lmac_count)
807 return 0;
808
809 err = cgx_fwi_read_version(&resp, cgx);
810 if (err)
811 return err;
812
813 major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
814 minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
815 dev_dbg(dev, "Firmware command interface version = %d.%d\n",
816 major_ver, minor_ver);
817 if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
818 minor_ver != CGX_FIRMWARE_MINOR_VER)
819 return -EIO;
820 else
821 return 0;
822 }
823
cgx_lmac_linkup_work(struct work_struct * work)824 static void cgx_lmac_linkup_work(struct work_struct *work)
825 {
826 struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
827 struct device *dev = &cgx->pdev->dev;
828 int i, err;
829
830 /* Do Link up for all the lmacs */
831 for (i = 0; i < cgx->lmac_count; i++) {
832 err = cgx_fwi_link_change(cgx, i, true);
833 if (err)
834 dev_info(dev, "cgx port %d:%d Link up command failed\n",
835 cgx->cgx_id, i);
836 }
837 }
838
cgx_lmac_linkup_start(void * cgxd)839 int cgx_lmac_linkup_start(void *cgxd)
840 {
841 struct cgx *cgx = cgxd;
842
843 if (!cgx)
844 return -ENODEV;
845
846 queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
847
848 return 0;
849 }
850
cgx_lmac_init(struct cgx * cgx)851 static int cgx_lmac_init(struct cgx *cgx)
852 {
853 struct lmac *lmac;
854 int i, err;
855
856 cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
857 if (cgx->lmac_count > MAX_LMAC_PER_CGX)
858 cgx->lmac_count = MAX_LMAC_PER_CGX;
859
860 for (i = 0; i < cgx->lmac_count; i++) {
861 lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
862 if (!lmac)
863 return -ENOMEM;
864 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
865 if (!lmac->name) {
866 err = -ENOMEM;
867 goto err_lmac_free;
868 }
869 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
870 lmac->lmac_id = i;
871 lmac->cgx = cgx;
872 init_waitqueue_head(&lmac->wq_cmd_cmplt);
873 mutex_init(&lmac->cmd_lock);
874 spin_lock_init(&lmac->event_cb_lock);
875 err = request_irq(pci_irq_vector(cgx->pdev,
876 CGX_LMAC_FWI + i * 9),
877 cgx_fwi_event_handler, 0, lmac->name, lmac);
878 if (err)
879 goto err_irq;
880
881 /* Enable interrupt */
882 cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
883 FW_CGX_INT);
884
885 /* Add reference */
886 cgx->lmac_idmap[i] = lmac;
887 cgx_lmac_pause_frm_config(cgx, i, true);
888 }
889
890 return cgx_lmac_verify_fwi_version(cgx);
891
892 err_irq:
893 kfree(lmac->name);
894 err_lmac_free:
895 kfree(lmac);
896 return err;
897 }
898
cgx_lmac_exit(struct cgx * cgx)899 static int cgx_lmac_exit(struct cgx *cgx)
900 {
901 struct lmac *lmac;
902 int i;
903
904 if (cgx->cgx_cmd_workq) {
905 flush_workqueue(cgx->cgx_cmd_workq);
906 destroy_workqueue(cgx->cgx_cmd_workq);
907 cgx->cgx_cmd_workq = NULL;
908 }
909
910 /* Free all lmac related resources */
911 for (i = 0; i < cgx->lmac_count; i++) {
912 cgx_lmac_pause_frm_config(cgx, i, false);
913 lmac = cgx->lmac_idmap[i];
914 if (!lmac)
915 continue;
916 free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
917 kfree(lmac->name);
918 kfree(lmac);
919 }
920
921 return 0;
922 }
923
cgx_probe(struct pci_dev * pdev,const struct pci_device_id * id)924 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
925 {
926 struct device *dev = &pdev->dev;
927 struct cgx *cgx;
928 int err, nvec;
929
930 cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
931 if (!cgx)
932 return -ENOMEM;
933 cgx->pdev = pdev;
934
935 pci_set_drvdata(pdev, cgx);
936
937 err = pci_enable_device(pdev);
938 if (err) {
939 dev_err(dev, "Failed to enable PCI device\n");
940 pci_set_drvdata(pdev, NULL);
941 return err;
942 }
943
944 err = pci_request_regions(pdev, DRV_NAME);
945 if (err) {
946 dev_err(dev, "PCI request regions failed 0x%x\n", err);
947 goto err_disable_device;
948 }
949
950 /* MAP configuration registers */
951 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
952 if (!cgx->reg_base) {
953 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
954 err = -ENOMEM;
955 goto err_release_regions;
956 }
957
958 nvec = CGX_NVEC;
959 err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
960 if (err < 0 || err != nvec) {
961 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
962 nvec, err);
963 goto err_release_regions;
964 }
965
966 cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
967 & CGX_ID_MASK;
968
969 /* init wq for processing linkup requests */
970 INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
971 cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
972 if (!cgx->cgx_cmd_workq) {
973 dev_err(dev, "alloc workqueue failed for cgx cmd");
974 err = -ENOMEM;
975 goto err_free_irq_vectors;
976 }
977
978 list_add(&cgx->cgx_list, &cgx_list);
979
980 cgx_link_usertable_init();
981
982 err = cgx_lmac_init(cgx);
983 if (err)
984 goto err_release_lmac;
985
986 return 0;
987
988 err_release_lmac:
989 cgx_lmac_exit(cgx);
990 list_del(&cgx->cgx_list);
991 err_free_irq_vectors:
992 pci_free_irq_vectors(pdev);
993 err_release_regions:
994 pci_release_regions(pdev);
995 err_disable_device:
996 pci_disable_device(pdev);
997 pci_set_drvdata(pdev, NULL);
998 return err;
999 }
1000
cgx_remove(struct pci_dev * pdev)1001 static void cgx_remove(struct pci_dev *pdev)
1002 {
1003 struct cgx *cgx = pci_get_drvdata(pdev);
1004
1005 cgx_lmac_exit(cgx);
1006 list_del(&cgx->cgx_list);
1007 pci_free_irq_vectors(pdev);
1008 pci_release_regions(pdev);
1009 pci_disable_device(pdev);
1010 pci_set_drvdata(pdev, NULL);
1011 }
1012
1013 struct pci_driver cgx_driver = {
1014 .name = DRV_NAME,
1015 .id_table = cgx_id_table,
1016 .probe = cgx_probe,
1017 .remove = cgx_remove,
1018 };
1019