1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu.h"
16 #include "cgx.h"
17 #include "rvu_reg.h"
18 #include "rvu_trace.h"
19
20 struct cgx_evq_entry {
21 struct list_head evq_node;
22 struct cgx_link_event link_event;
23 };
24
25 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
26 static struct _req_type __maybe_unused \
27 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
28 { \
29 struct _req_type *req; \
30 \
31 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
32 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
33 sizeof(struct _rsp_type)); \
34 if (!req) \
35 return NULL; \
36 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
37 req->hdr.id = _id; \
38 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
39 return req; \
40 }
41
42 MBOX_UP_CGX_MESSAGES
43 #undef M
44
45 /* Returns bitmap of mapped PFs */
cgxlmac_to_pfmap(struct rvu * rvu,u8 cgx_id,u8 lmac_id)46 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
47 {
48 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
49 }
50
cgxlmac_to_pf(struct rvu * rvu,int cgx_id,int lmac_id)51 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
52 {
53 unsigned long pfmap;
54
55 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
56
57 /* Assumes only one pf mapped to a cgx lmac port */
58 if (!pfmap)
59 return -ENODEV;
60 else
61 return find_first_bit(&pfmap, 16);
62 }
63
cgxlmac_id_to_bmap(u8 cgx_id,u8 lmac_id)64 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
65 {
66 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
67 }
68
rvu_cgx_pdata(u8 cgx_id,struct rvu * rvu)69 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
70 {
71 if (cgx_id >= rvu->cgx_cnt_max)
72 return NULL;
73
74 return rvu->cgx_idmap[cgx_id];
75 }
76
rvu_map_cgx_lmac_pf(struct rvu * rvu)77 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
78 {
79 struct npc_pkind *pkind = &rvu->hw->pkind;
80 int cgx_cnt_max = rvu->cgx_cnt_max;
81 int cgx, lmac_cnt, lmac;
82 int pf = PF_CGXMAP_BASE;
83 int size, free_pkind;
84
85 if (!cgx_cnt_max)
86 return 0;
87
88 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
89 return -EINVAL;
90
91 /* Alloc map table
92 * An additional entry is required since PF id starts from 1 and
93 * hence entry at offset 0 is invalid.
94 */
95 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
96 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
97 if (!rvu->pf2cgxlmac_map)
98 return -ENOMEM;
99
100 /* Initialize all entries with an invalid cgx and lmac id */
101 memset(rvu->pf2cgxlmac_map, 0xFF, size);
102
103 /* Reverse map table */
104 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
105 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
106 GFP_KERNEL);
107 if (!rvu->cgxlmac2pf_map)
108 return -ENOMEM;
109
110 rvu->cgx_mapped_pfs = 0;
111 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
112 if (!rvu_cgx_pdata(cgx, rvu))
113 continue;
114 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
115 for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
116 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
117 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
118 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
119 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
120 rvu->cgx_mapped_pfs++;
121 }
122 }
123 return 0;
124 }
125
rvu_cgx_send_link_info(int cgx_id,int lmac_id,struct rvu * rvu)126 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
127 {
128 struct cgx_evq_entry *qentry;
129 unsigned long flags;
130 int err;
131
132 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
133 if (!qentry)
134 return -ENOMEM;
135
136 /* Lock the event queue before we read the local link status */
137 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
138 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
139 &qentry->link_event.link_uinfo);
140 qentry->link_event.cgx_id = cgx_id;
141 qentry->link_event.lmac_id = lmac_id;
142 if (err)
143 goto skip_add;
144 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
145 skip_add:
146 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
147
148 /* start worker to process the events */
149 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
150
151 return 0;
152 }
153
154 /* This is called from interrupt context and is expected to be atomic */
cgx_lmac_postevent(struct cgx_link_event * event,void * data)155 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
156 {
157 struct cgx_evq_entry *qentry;
158 struct rvu *rvu = data;
159
160 /* post event to the event queue */
161 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
162 if (!qentry)
163 return -ENOMEM;
164 qentry->link_event = *event;
165 spin_lock(&rvu->cgx_evq_lock);
166 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
167 spin_unlock(&rvu->cgx_evq_lock);
168
169 /* start worker to process the events */
170 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
171
172 return 0;
173 }
174
cgx_notify_pfs(struct cgx_link_event * event,struct rvu * rvu)175 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
176 {
177 struct cgx_link_user_info *linfo;
178 struct cgx_link_info_msg *msg;
179 unsigned long pfmap;
180 int err, pfid;
181
182 linfo = &event->link_uinfo;
183 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
184
185 do {
186 pfid = find_first_bit(&pfmap, 16);
187 clear_bit(pfid, &pfmap);
188
189 /* check if notification is enabled */
190 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
191 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
192 event->cgx_id, event->lmac_id,
193 linfo->link_up ? "UP" : "DOWN");
194 continue;
195 }
196
197 /* Send mbox message to PF */
198 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
199 if (!msg)
200 continue;
201 msg->link_info = *linfo;
202 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
203 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
204 if (err)
205 dev_warn(rvu->dev, "notification to pf %d failed\n",
206 pfid);
207 } while (pfmap);
208 }
209
cgx_evhandler_task(struct work_struct * work)210 static void cgx_evhandler_task(struct work_struct *work)
211 {
212 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
213 struct cgx_evq_entry *qentry;
214 struct cgx_link_event *event;
215 unsigned long flags;
216
217 do {
218 /* Dequeue an event */
219 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
220 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
221 struct cgx_evq_entry,
222 evq_node);
223 if (qentry)
224 list_del(&qentry->evq_node);
225 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
226 if (!qentry)
227 break; /* nothing more to process */
228
229 event = &qentry->link_event;
230
231 /* process event */
232 cgx_notify_pfs(event, rvu);
233 kfree(qentry);
234 } while (1);
235 }
236
cgx_lmac_event_handler_init(struct rvu * rvu)237 static int cgx_lmac_event_handler_init(struct rvu *rvu)
238 {
239 struct cgx_event_cb cb;
240 int cgx, lmac, err;
241 void *cgxd;
242
243 spin_lock_init(&rvu->cgx_evq_lock);
244 INIT_LIST_HEAD(&rvu->cgx_evq_head);
245 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
246 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
247 if (!rvu->cgx_evh_wq) {
248 dev_err(rvu->dev, "alloc workqueue failed");
249 return -ENOMEM;
250 }
251
252 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
253 cb.data = rvu;
254
255 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
256 cgxd = rvu_cgx_pdata(cgx, rvu);
257 if (!cgxd)
258 continue;
259 for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
260 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
261 if (err)
262 dev_err(rvu->dev,
263 "%d:%d handler register failed\n",
264 cgx, lmac);
265 }
266 }
267
268 return 0;
269 }
270
rvu_cgx_wq_destroy(struct rvu * rvu)271 static void rvu_cgx_wq_destroy(struct rvu *rvu)
272 {
273 if (rvu->cgx_evh_wq) {
274 flush_workqueue(rvu->cgx_evh_wq);
275 destroy_workqueue(rvu->cgx_evh_wq);
276 rvu->cgx_evh_wq = NULL;
277 }
278 }
279
rvu_cgx_init(struct rvu * rvu)280 int rvu_cgx_init(struct rvu *rvu)
281 {
282 int cgx, err;
283 void *cgxd;
284
285 /* CGX port id starts from 0 and are not necessarily contiguous
286 * Hence we allocate resources based on the maximum port id value.
287 */
288 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
289 if (!rvu->cgx_cnt_max) {
290 dev_info(rvu->dev, "No CGX devices found!\n");
291 return -ENODEV;
292 }
293
294 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
295 sizeof(void *), GFP_KERNEL);
296 if (!rvu->cgx_idmap)
297 return -ENOMEM;
298
299 /* Initialize the cgxdata table */
300 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
301 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
302
303 /* Map CGX LMAC interfaces to RVU PFs */
304 err = rvu_map_cgx_lmac_pf(rvu);
305 if (err)
306 return err;
307
308 /* Register for CGX events */
309 err = cgx_lmac_event_handler_init(rvu);
310 if (err)
311 return err;
312
313 mutex_init(&rvu->cgx_cfg_lock);
314
315 /* Ensure event handler registration is completed, before
316 * we turn on the links
317 */
318 mb();
319
320 /* Do link up for all CGX ports */
321 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
322 cgxd = rvu_cgx_pdata(cgx, rvu);
323 if (!cgxd)
324 continue;
325 err = cgx_lmac_linkup_start(cgxd);
326 if (err)
327 dev_err(rvu->dev,
328 "Link up process failed to start on cgx %d\n",
329 cgx);
330 }
331
332 return 0;
333 }
334
rvu_cgx_exit(struct rvu * rvu)335 int rvu_cgx_exit(struct rvu *rvu)
336 {
337 int cgx, lmac;
338 void *cgxd;
339
340 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
341 cgxd = rvu_cgx_pdata(cgx, rvu);
342 if (!cgxd)
343 continue;
344 for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
345 cgx_lmac_evh_unregister(cgxd, lmac);
346 }
347
348 /* Ensure event handler unregister is completed */
349 mb();
350
351 rvu_cgx_wq_destroy(rvu);
352 return 0;
353 }
354
355 /* Most of the CGX configuration is restricted to the mapped PF only,
356 * VF's of mapped PF and other PFs are not allowed. This fn() checks
357 * whether a PFFUNC is permitted to do the config or not.
358 */
is_cgx_config_permitted(struct rvu * rvu,u16 pcifunc)359 static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
360 {
361 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
362 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
363 return false;
364 return true;
365 }
366
rvu_cgx_enadis_rx_bp(struct rvu * rvu,int pf,bool enable)367 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
368 {
369 u8 cgx_id, lmac_id;
370 void *cgxd;
371
372 if (!is_pf_cgxmapped(rvu, pf))
373 return;
374
375 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
376 cgxd = rvu_cgx_pdata(cgx_id, rvu);
377
378 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
379 if (enable)
380 cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
381 else
382 cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
383 }
384
rvu_cgx_config_rxtx(struct rvu * rvu,u16 pcifunc,bool start)385 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
386 {
387 int pf = rvu_get_pf(pcifunc);
388 u8 cgx_id, lmac_id;
389
390 if (!is_cgx_config_permitted(rvu, pcifunc))
391 return -EPERM;
392
393 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
394
395 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
396
397 return 0;
398 }
399
rvu_mbox_handler_cgx_start_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)400 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
401 struct msg_rsp *rsp)
402 {
403 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
404 return 0;
405 }
406
rvu_mbox_handler_cgx_stop_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)407 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
408 struct msg_rsp *rsp)
409 {
410 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
411 return 0;
412 }
413
rvu_mbox_handler_cgx_stats(struct rvu * rvu,struct msg_req * req,struct cgx_stats_rsp * rsp)414 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
415 struct cgx_stats_rsp *rsp)
416 {
417 int pf = rvu_get_pf(req->hdr.pcifunc);
418 int stat = 0, err = 0;
419 u64 tx_stat, rx_stat;
420 u8 cgx_idx, lmac;
421 void *cgxd;
422
423 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
424 return -ENODEV;
425
426 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
427 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
428
429 /* Rx stats */
430 while (stat < CGX_RX_STATS_COUNT) {
431 err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
432 if (err)
433 return err;
434 rsp->rx_stats[stat] = rx_stat;
435 stat++;
436 }
437
438 /* Tx stats */
439 stat = 0;
440 while (stat < CGX_TX_STATS_COUNT) {
441 err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
442 if (err)
443 return err;
444 rsp->tx_stats[stat] = tx_stat;
445 stat++;
446 }
447 return 0;
448 }
449
rvu_mbox_handler_cgx_mac_addr_set(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)450 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
451 struct cgx_mac_addr_set_or_get *req,
452 struct cgx_mac_addr_set_or_get *rsp)
453 {
454 int pf = rvu_get_pf(req->hdr.pcifunc);
455 u8 cgx_id, lmac_id;
456
457 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
458 return -EPERM;
459
460 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
461
462 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
463
464 return 0;
465 }
466
rvu_mbox_handler_cgx_mac_addr_get(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)467 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
468 struct cgx_mac_addr_set_or_get *req,
469 struct cgx_mac_addr_set_or_get *rsp)
470 {
471 int pf = rvu_get_pf(req->hdr.pcifunc);
472 u8 cgx_id, lmac_id;
473 int rc = 0, i;
474 u64 cfg;
475
476 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
477 return -EPERM;
478
479 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
480
481 rsp->hdr.rc = rc;
482 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
483 /* copy 48 bit mac address to req->mac_addr */
484 for (i = 0; i < ETH_ALEN; i++)
485 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
486 return 0;
487 }
488
rvu_mbox_handler_cgx_promisc_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)489 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
490 struct msg_rsp *rsp)
491 {
492 u16 pcifunc = req->hdr.pcifunc;
493 int pf = rvu_get_pf(pcifunc);
494 u8 cgx_id, lmac_id;
495
496 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
497 return -EPERM;
498
499 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
500
501 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
502 return 0;
503 }
504
rvu_mbox_handler_cgx_promisc_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)505 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
506 struct msg_rsp *rsp)
507 {
508 int pf = rvu_get_pf(req->hdr.pcifunc);
509 u8 cgx_id, lmac_id;
510
511 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
512 return -EPERM;
513
514 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
515
516 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
517 return 0;
518 }
519
rvu_cgx_ptp_rx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)520 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
521 {
522 int pf = rvu_get_pf(pcifunc);
523 u8 cgx_id, lmac_id;
524 void *cgxd;
525
526 /* This msg is expected only from PFs that are mapped to CGX LMACs,
527 * if received from other PF/VF simply ACK, nothing to do.
528 */
529 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
530 !is_pf_cgxmapped(rvu, pf))
531 return -ENODEV;
532
533 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
534 cgxd = rvu_cgx_pdata(cgx_id, rvu);
535
536 cgx_lmac_ptp_config(cgxd, lmac_id, enable);
537 /* If PTP is enabled then inform NPC that packets to be
538 * parsed by this PF will have their data shifted by 8 bytes
539 * and if PTP is disabled then no shift is required
540 */
541 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
542 return -EINVAL;
543
544 return 0;
545 }
546
rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)547 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
548 struct msg_rsp *rsp)
549 {
550 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
551 }
552
rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)553 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
554 struct msg_rsp *rsp)
555 {
556 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
557 }
558
rvu_cgx_config_linkevents(struct rvu * rvu,u16 pcifunc,bool en)559 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
560 {
561 int pf = rvu_get_pf(pcifunc);
562 u8 cgx_id, lmac_id;
563
564 if (!is_cgx_config_permitted(rvu, pcifunc))
565 return -EPERM;
566
567 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
568
569 if (en) {
570 set_bit(pf, &rvu->pf_notify_bmap);
571 /* Send the current link status to PF */
572 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
573 } else {
574 clear_bit(pf, &rvu->pf_notify_bmap);
575 }
576
577 return 0;
578 }
579
rvu_mbox_handler_cgx_start_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)580 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
581 struct msg_rsp *rsp)
582 {
583 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
584 return 0;
585 }
586
rvu_mbox_handler_cgx_stop_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)587 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
588 struct msg_rsp *rsp)
589 {
590 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
591 return 0;
592 }
593
rvu_mbox_handler_cgx_get_linkinfo(struct rvu * rvu,struct msg_req * req,struct cgx_link_info_msg * rsp)594 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
595 struct cgx_link_info_msg *rsp)
596 {
597 u8 cgx_id, lmac_id;
598 int pf, err;
599
600 pf = rvu_get_pf(req->hdr.pcifunc);
601
602 if (!is_pf_cgxmapped(rvu, pf))
603 return -ENODEV;
604
605 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
606
607 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
608 &rsp->link_info);
609 return err;
610 }
611
rvu_cgx_config_intlbk(struct rvu * rvu,u16 pcifunc,bool en)612 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
613 {
614 int pf = rvu_get_pf(pcifunc);
615 u8 cgx_id, lmac_id;
616
617 if (!is_cgx_config_permitted(rvu, pcifunc))
618 return -EPERM;
619
620 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
621
622 return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
623 lmac_id, en);
624 }
625
rvu_mbox_handler_cgx_intlbk_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)626 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
627 struct msg_rsp *rsp)
628 {
629 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
630 return 0;
631 }
632
rvu_mbox_handler_cgx_intlbk_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)633 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
634 struct msg_rsp *rsp)
635 {
636 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
637 return 0;
638 }
639
rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu * rvu,struct cgx_pause_frm_cfg * req,struct cgx_pause_frm_cfg * rsp)640 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
641 struct cgx_pause_frm_cfg *req,
642 struct cgx_pause_frm_cfg *rsp)
643 {
644 int pf = rvu_get_pf(req->hdr.pcifunc);
645 u8 cgx_id, lmac_id;
646
647 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
648 * if received from other PF/VF simply ACK, nothing to do.
649 */
650 if (!is_pf_cgxmapped(rvu, pf))
651 return -ENODEV;
652
653 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
654
655 if (req->set)
656 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
657 req->tx_pause, req->rx_pause);
658 else
659 cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
660 &rsp->tx_pause, &rsp->rx_pause);
661 return 0;
662 }
663
664 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
665 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
666 */
rvu_cgx_nix_cuml_stats(struct rvu * rvu,void * cgxd,int lmac_id,int index,int rxtxflag,u64 * stat)667 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
668 int index, int rxtxflag, u64 *stat)
669 {
670 struct rvu_block *block;
671 int blkaddr;
672 u16 pcifunc;
673 int pf, lf;
674
675 *stat = 0;
676
677 if (!cgxd || !rvu)
678 return -EINVAL;
679
680 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
681 if (pf < 0)
682 return pf;
683
684 /* Assumes LF of a PF and all of its VF belongs to the same
685 * NIX block
686 */
687 pcifunc = pf << RVU_PFVF_PF_SHIFT;
688 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
689 if (blkaddr < 0)
690 return 0;
691 block = &rvu->hw->block[blkaddr];
692
693 for (lf = 0; lf < block->lf.max; lf++) {
694 /* Check if a lf is attached to this PF or one of its VFs */
695 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
696 ~RVU_PFVF_FUNC_MASK)))
697 continue;
698 if (rxtxflag == NIX_STATS_RX)
699 *stat += rvu_read64(rvu, blkaddr,
700 NIX_AF_LFX_RX_STATX(lf, index));
701 else
702 *stat += rvu_read64(rvu, blkaddr,
703 NIX_AF_LFX_TX_STATX(lf, index));
704 }
705
706 return 0;
707 }
708
rvu_cgx_start_stop_io(struct rvu * rvu,u16 pcifunc,bool start)709 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
710 {
711 struct rvu_pfvf *parent_pf, *pfvf;
712 int cgx_users, err = 0;
713
714 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
715 return 0;
716
717 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
718 pfvf = rvu_get_pfvf(rvu, pcifunc);
719
720 mutex_lock(&rvu->cgx_cfg_lock);
721
722 if (start && pfvf->cgx_in_use)
723 goto exit; /* CGX is already started hence nothing to do */
724 if (!start && !pfvf->cgx_in_use)
725 goto exit; /* CGX is already stopped hence nothing to do */
726
727 if (start) {
728 cgx_users = parent_pf->cgx_users;
729 parent_pf->cgx_users++;
730 } else {
731 parent_pf->cgx_users--;
732 cgx_users = parent_pf->cgx_users;
733 }
734
735 /* Start CGX when first of all NIXLFs is started.
736 * Stop CGX when last of all NIXLFs is stopped.
737 */
738 if (!cgx_users) {
739 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
740 start);
741 if (err) {
742 dev_err(rvu->dev, "Unable to %s CGX\n",
743 start ? "start" : "stop");
744 /* Revert the usage count in case of error */
745 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
746 : parent_pf->cgx_users + 1;
747 goto exit;
748 }
749 }
750 pfvf->cgx_in_use = start;
751 exit:
752 mutex_unlock(&rvu->cgx_cfg_lock);
753 return err;
754 }
755