1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 #include <linux/vmalloc.h>
16
17 #include "ionic.h"
18 #include "ionic_bus.h"
19 #include "ionic_dev.h"
20 #include "ionic_lif.h"
21 #include "ionic_txrx.h"
22 #include "ionic_ethtool.h"
23 #include "ionic_debugfs.h"
24
25 /* queuetype support level */
26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
29 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
30 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
31 * 1 = ... with Tx SG version 1
32 */
33 };
34
35 static void ionic_link_status_check(struct ionic_lif *lif);
36 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
37 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
38 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
39
40 static void ionic_txrx_deinit(struct ionic_lif *lif);
41 static int ionic_txrx_init(struct ionic_lif *lif);
42 static int ionic_start_queues(struct ionic_lif *lif);
43 static void ionic_stop_queues(struct ionic_lif *lif);
44 static void ionic_lif_queue_identify(struct ionic_lif *lif);
45
ionic_dim_work(struct work_struct * work)46 static void ionic_dim_work(struct work_struct *work)
47 {
48 struct dim *dim = container_of(work, struct dim, work);
49 struct ionic_intr_info *intr;
50 struct dim_cq_moder cur_moder;
51 struct ionic_qcq *qcq;
52 struct ionic_lif *lif;
53 u32 new_coal;
54
55 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
56 qcq = container_of(dim, struct ionic_qcq, dim);
57 lif = qcq->q.lif;
58 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
59 new_coal = new_coal ? new_coal : 1;
60
61 intr = &qcq->intr;
62 if (intr->dim_coal_hw != new_coal) {
63 intr->dim_coal_hw = new_coal;
64
65 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
66 intr->index, intr->dim_coal_hw);
67 }
68
69 dim->state = DIM_START_MEASURE;
70 }
71
ionic_lif_deferred_work(struct work_struct * work)72 static void ionic_lif_deferred_work(struct work_struct *work)
73 {
74 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
75 struct ionic_deferred *def = &lif->deferred;
76 struct ionic_deferred_work *w = NULL;
77
78 do {
79 spin_lock_bh(&def->lock);
80 if (!list_empty(&def->list)) {
81 w = list_first_entry(&def->list,
82 struct ionic_deferred_work, list);
83 list_del(&w->list);
84 }
85 spin_unlock_bh(&def->lock);
86
87 if (!w)
88 break;
89
90 switch (w->type) {
91 case IONIC_DW_TYPE_RX_MODE:
92 ionic_lif_rx_mode(lif);
93 break;
94 case IONIC_DW_TYPE_LINK_STATUS:
95 ionic_link_status_check(lif);
96 break;
97 case IONIC_DW_TYPE_LIF_RESET:
98 if (w->fw_status) {
99 ionic_lif_handle_fw_up(lif);
100 } else {
101 ionic_lif_handle_fw_down(lif);
102
103 /* Fire off another watchdog to see
104 * if the FW is already back rather than
105 * waiting another whole cycle
106 */
107 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
108 }
109 break;
110 default:
111 break;
112 }
113 kfree(w);
114 w = NULL;
115 } while (true);
116 }
117
ionic_lif_deferred_enqueue(struct ionic_deferred * def,struct ionic_deferred_work * work)118 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
119 struct ionic_deferred_work *work)
120 {
121 spin_lock_bh(&def->lock);
122 list_add_tail(&work->list, &def->list);
123 spin_unlock_bh(&def->lock);
124 schedule_work(&def->work);
125 }
126
ionic_link_status_check(struct ionic_lif * lif)127 static void ionic_link_status_check(struct ionic_lif *lif)
128 {
129 struct net_device *netdev = lif->netdev;
130 u16 link_status;
131 bool link_up;
132
133 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
134 return;
135
136 /* Don't put carrier back up if we're in a broken state */
137 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
138 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
139 return;
140 }
141
142 link_status = le16_to_cpu(lif->info->status.link_status);
143 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
144
145 if (link_up) {
146 int err = 0;
147
148 if (netdev->flags & IFF_UP && netif_running(netdev)) {
149 mutex_lock(&lif->queue_lock);
150 err = ionic_start_queues(lif);
151 if (err && err != -EBUSY) {
152 netdev_err(lif->netdev,
153 "Failed to start queues: %d\n", err);
154 set_bit(IONIC_LIF_F_BROKEN, lif->state);
155 netif_carrier_off(lif->netdev);
156 }
157 mutex_unlock(&lif->queue_lock);
158 }
159
160 if (!err && !netif_carrier_ok(netdev)) {
161 ionic_port_identify(lif->ionic);
162 netdev_info(netdev, "Link up - %d Gbps\n",
163 le32_to_cpu(lif->info->status.link_speed) / 1000);
164 netif_carrier_on(netdev);
165 }
166 } else {
167 if (netif_carrier_ok(netdev)) {
168 netdev_info(netdev, "Link down\n");
169 netif_carrier_off(netdev);
170 }
171
172 if (netdev->flags & IFF_UP && netif_running(netdev)) {
173 mutex_lock(&lif->queue_lock);
174 ionic_stop_queues(lif);
175 mutex_unlock(&lif->queue_lock);
176 }
177 }
178
179 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
180 }
181
ionic_link_status_check_request(struct ionic_lif * lif,bool can_sleep)182 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
183 {
184 struct ionic_deferred_work *work;
185
186 /* we only need one request outstanding at a time */
187 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
188 return;
189
190 if (!can_sleep) {
191 work = kzalloc(sizeof(*work), GFP_ATOMIC);
192 if (!work) {
193 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
194 return;
195 }
196
197 work->type = IONIC_DW_TYPE_LINK_STATUS;
198 ionic_lif_deferred_enqueue(&lif->deferred, work);
199 } else {
200 ionic_link_status_check(lif);
201 }
202 }
203
ionic_napi_deadline(struct timer_list * timer)204 static void ionic_napi_deadline(struct timer_list *timer)
205 {
206 struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
207
208 napi_schedule(&qcq->napi);
209 }
210
ionic_isr(int irq,void * data)211 static irqreturn_t ionic_isr(int irq, void *data)
212 {
213 struct napi_struct *napi = data;
214
215 napi_schedule_irqoff(napi);
216
217 return IRQ_HANDLED;
218 }
219
ionic_request_irq(struct ionic_lif * lif,struct ionic_qcq * qcq)220 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
221 {
222 struct ionic_intr_info *intr = &qcq->intr;
223 struct device *dev = lif->ionic->dev;
224 struct ionic_queue *q = &qcq->q;
225 const char *name;
226
227 if (lif->registered)
228 name = lif->netdev->name;
229 else
230 name = dev_name(dev);
231
232 snprintf(intr->name, sizeof(intr->name),
233 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
234
235 return devm_request_irq(dev, intr->vector, ionic_isr,
236 0, intr->name, &qcq->napi);
237 }
238
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)239 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
240 {
241 struct ionic *ionic = lif->ionic;
242 int index;
243
244 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
245 if (index == ionic->nintrs) {
246 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
247 __func__, index, ionic->nintrs);
248 return -ENOSPC;
249 }
250
251 set_bit(index, ionic->intrs);
252 ionic_intr_init(&ionic->idev, intr, index);
253
254 return 0;
255 }
256
ionic_intr_free(struct ionic * ionic,int index)257 static void ionic_intr_free(struct ionic *ionic, int index)
258 {
259 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
260 clear_bit(index, ionic->intrs);
261 }
262
ionic_qcq_enable(struct ionic_qcq * qcq)263 static int ionic_qcq_enable(struct ionic_qcq *qcq)
264 {
265 struct ionic_queue *q = &qcq->q;
266 struct ionic_lif *lif = q->lif;
267 struct ionic_dev *idev;
268 struct device *dev;
269
270 struct ionic_admin_ctx ctx = {
271 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
272 .cmd.q_control = {
273 .opcode = IONIC_CMD_Q_CONTROL,
274 .lif_index = cpu_to_le16(lif->index),
275 .type = q->type,
276 .index = cpu_to_le32(q->index),
277 .oper = IONIC_Q_ENABLE,
278 },
279 };
280 int ret;
281
282 idev = &lif->ionic->idev;
283 dev = lif->ionic->dev;
284
285 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
286 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
287
288 if (qcq->flags & IONIC_QCQ_F_INTR)
289 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
290
291 ret = ionic_adminq_post_wait(lif, &ctx);
292 if (ret)
293 return ret;
294
295 if (qcq->napi.poll)
296 napi_enable(&qcq->napi);
297
298 if (qcq->flags & IONIC_QCQ_F_INTR) {
299 irq_set_affinity_hint(qcq->intr.vector,
300 &qcq->intr.affinity_mask);
301 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
302 IONIC_INTR_MASK_CLEAR);
303 }
304
305 return 0;
306 }
307
ionic_qcq_disable(struct ionic_lif * lif,struct ionic_qcq * qcq,int fw_err)308 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
309 {
310 struct ionic_queue *q;
311
312 struct ionic_admin_ctx ctx = {
313 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
314 .cmd.q_control = {
315 .opcode = IONIC_CMD_Q_CONTROL,
316 .oper = IONIC_Q_DISABLE,
317 },
318 };
319
320 if (!qcq) {
321 netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
322 return -ENXIO;
323 }
324
325 q = &qcq->q;
326
327 if (qcq->flags & IONIC_QCQ_F_INTR) {
328 struct ionic_dev *idev = &lif->ionic->idev;
329
330 cancel_work_sync(&qcq->dim.work);
331 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
332 IONIC_INTR_MASK_SET);
333 synchronize_irq(qcq->intr.vector);
334 irq_set_affinity_hint(qcq->intr.vector, NULL);
335 napi_disable(&qcq->napi);
336 del_timer_sync(&qcq->napi_deadline);
337 }
338
339 /* If there was a previous fw communcation error, don't bother with
340 * sending the adminq command and just return the same error value.
341 */
342 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
343 return fw_err;
344
345 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
346 ctx.cmd.q_control.type = q->type;
347 ctx.cmd.q_control.index = cpu_to_le32(q->index);
348 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
349 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
350
351 return ionic_adminq_post_wait(lif, &ctx);
352 }
353
ionic_lif_qcq_deinit(struct ionic_lif * lif,struct ionic_qcq * qcq)354 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
355 {
356 struct ionic_dev *idev = &lif->ionic->idev;
357
358 if (!qcq)
359 return;
360
361 if (!(qcq->flags & IONIC_QCQ_F_INITED))
362 return;
363
364 if (qcq->flags & IONIC_QCQ_F_INTR) {
365 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
366 IONIC_INTR_MASK_SET);
367 netif_napi_del(&qcq->napi);
368 }
369
370 qcq->flags &= ~IONIC_QCQ_F_INITED;
371 }
372
ionic_qcq_intr_free(struct ionic_lif * lif,struct ionic_qcq * qcq)373 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
374 {
375 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
376 return;
377
378 irq_set_affinity_hint(qcq->intr.vector, NULL);
379 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
380 qcq->intr.vector = 0;
381 ionic_intr_free(lif->ionic, qcq->intr.index);
382 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
383 }
384
ionic_qcq_free(struct ionic_lif * lif,struct ionic_qcq * qcq)385 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
386 {
387 struct device *dev = lif->ionic->dev;
388
389 if (!qcq)
390 return;
391
392 ionic_debugfs_del_qcq(qcq);
393
394 if (qcq->q_base) {
395 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
396 qcq->q_base = NULL;
397 qcq->q_base_pa = 0;
398 }
399
400 if (qcq->cq_base) {
401 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
402 qcq->cq_base = NULL;
403 qcq->cq_base_pa = 0;
404 }
405
406 if (qcq->sg_base) {
407 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
408 qcq->sg_base = NULL;
409 qcq->sg_base_pa = 0;
410 }
411
412 ionic_qcq_intr_free(lif, qcq);
413
414 if (qcq->cq.info) {
415 vfree(qcq->cq.info);
416 qcq->cq.info = NULL;
417 }
418 if (qcq->q.info) {
419 vfree(qcq->q.info);
420 qcq->q.info = NULL;
421 }
422 }
423
ionic_qcqs_free(struct ionic_lif * lif)424 static void ionic_qcqs_free(struct ionic_lif *lif)
425 {
426 struct device *dev = lif->ionic->dev;
427 struct ionic_qcq *adminqcq;
428 unsigned long irqflags;
429
430 if (lif->notifyqcq) {
431 ionic_qcq_free(lif, lif->notifyqcq);
432 devm_kfree(dev, lif->notifyqcq);
433 lif->notifyqcq = NULL;
434 }
435
436 if (lif->adminqcq) {
437 spin_lock_irqsave(&lif->adminq_lock, irqflags);
438 adminqcq = READ_ONCE(lif->adminqcq);
439 lif->adminqcq = NULL;
440 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
441 if (adminqcq) {
442 ionic_qcq_free(lif, adminqcq);
443 devm_kfree(dev, adminqcq);
444 }
445 }
446
447 if (lif->rxqcqs) {
448 devm_kfree(dev, lif->rxqstats);
449 lif->rxqstats = NULL;
450 devm_kfree(dev, lif->rxqcqs);
451 lif->rxqcqs = NULL;
452 }
453
454 if (lif->txqcqs) {
455 devm_kfree(dev, lif->txqstats);
456 lif->txqstats = NULL;
457 devm_kfree(dev, lif->txqcqs);
458 lif->txqcqs = NULL;
459 }
460 }
461
ionic_link_qcq_interrupts(struct ionic_qcq * src_qcq,struct ionic_qcq * n_qcq)462 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
463 struct ionic_qcq *n_qcq)
464 {
465 n_qcq->intr.vector = src_qcq->intr.vector;
466 n_qcq->intr.index = src_qcq->intr.index;
467 n_qcq->napi_qcq = src_qcq->napi_qcq;
468 }
469
ionic_alloc_qcq_interrupt(struct ionic_lif * lif,struct ionic_qcq * qcq)470 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
471 {
472 int err;
473
474 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
475 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
476 return 0;
477 }
478
479 err = ionic_intr_alloc(lif, &qcq->intr);
480 if (err) {
481 netdev_warn(lif->netdev, "no intr for %s: %d\n",
482 qcq->q.name, err);
483 goto err_out;
484 }
485
486 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
487 if (err < 0) {
488 netdev_warn(lif->netdev, "no vector for %s: %d\n",
489 qcq->q.name, err);
490 goto err_out_free_intr;
491 }
492 qcq->intr.vector = err;
493 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
494 IONIC_INTR_MASK_SET);
495
496 err = ionic_request_irq(lif, qcq);
497 if (err) {
498 netdev_warn(lif->netdev, "irq request failed %d\n", err);
499 goto err_out_free_intr;
500 }
501
502 /* try to get the irq on the local numa node first */
503 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
504 dev_to_node(lif->ionic->dev));
505 if (qcq->intr.cpu != -1)
506 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
507
508 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
509 return 0;
510
511 err_out_free_intr:
512 ionic_intr_free(lif->ionic, qcq->intr.index);
513 err_out:
514 return err;
515 }
516
ionic_qcq_alloc(struct ionic_lif * lif,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int sg_desc_size,unsigned int pid,struct ionic_qcq ** qcq)517 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
518 unsigned int index,
519 const char *name, unsigned int flags,
520 unsigned int num_descs, unsigned int desc_size,
521 unsigned int cq_desc_size,
522 unsigned int sg_desc_size,
523 unsigned int pid, struct ionic_qcq **qcq)
524 {
525 struct ionic_dev *idev = &lif->ionic->idev;
526 struct device *dev = lif->ionic->dev;
527 void *q_base, *cq_base, *sg_base;
528 dma_addr_t cq_base_pa = 0;
529 dma_addr_t sg_base_pa = 0;
530 dma_addr_t q_base_pa = 0;
531 struct ionic_qcq *new;
532 int err;
533
534 *qcq = NULL;
535
536 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
537 if (!new) {
538 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
539 err = -ENOMEM;
540 goto err_out;
541 }
542
543 new->q.dev = dev;
544 new->flags = flags;
545
546 new->q.info = vzalloc(num_descs * sizeof(*new->q.info));
547 if (!new->q.info) {
548 netdev_err(lif->netdev, "Cannot allocate queue info\n");
549 err = -ENOMEM;
550 goto err_out_free_qcq;
551 }
552
553 new->q.type = type;
554 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
555
556 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
557 desc_size, sg_desc_size, pid);
558 if (err) {
559 netdev_err(lif->netdev, "Cannot initialize queue\n");
560 goto err_out_free_q_info;
561 }
562
563 err = ionic_alloc_qcq_interrupt(lif, new);
564 if (err)
565 goto err_out;
566
567 new->cq.info = vzalloc(num_descs * sizeof(*new->cq.info));
568 if (!new->cq.info) {
569 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
570 err = -ENOMEM;
571 goto err_out_free_irq;
572 }
573
574 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
575 if (err) {
576 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
577 goto err_out_free_cq_info;
578 }
579
580 if (flags & IONIC_QCQ_F_NOTIFYQ) {
581 int q_size, cq_size;
582
583 /* q & cq need to be contiguous in case of notifyq */
584 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
585 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
586
587 new->q_size = PAGE_SIZE + q_size + cq_size;
588 new->q_base = dma_alloc_coherent(dev, new->q_size,
589 &new->q_base_pa, GFP_KERNEL);
590 if (!new->q_base) {
591 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
592 err = -ENOMEM;
593 goto err_out_free_cq_info;
594 }
595 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
596 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
597 ionic_q_map(&new->q, q_base, q_base_pa);
598
599 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
600 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
601 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
602 ionic_cq_bind(&new->cq, &new->q);
603 } else {
604 new->q_size = PAGE_SIZE + (num_descs * desc_size);
605 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
606 GFP_KERNEL);
607 if (!new->q_base) {
608 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
609 err = -ENOMEM;
610 goto err_out_free_cq_info;
611 }
612 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
613 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
614 ionic_q_map(&new->q, q_base, q_base_pa);
615
616 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
617 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
618 GFP_KERNEL);
619 if (!new->cq_base) {
620 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
621 err = -ENOMEM;
622 goto err_out_free_q;
623 }
624 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
625 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
626 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
627 ionic_cq_bind(&new->cq, &new->q);
628 }
629
630 if (flags & IONIC_QCQ_F_SG) {
631 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
632 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
633 GFP_KERNEL);
634 if (!new->sg_base) {
635 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
636 err = -ENOMEM;
637 goto err_out_free_cq;
638 }
639 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
640 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
641 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
642 }
643
644 INIT_WORK(&new->dim.work, ionic_dim_work);
645 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
646
647 *qcq = new;
648
649 return 0;
650
651 err_out_free_cq:
652 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
653 err_out_free_q:
654 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
655 err_out_free_cq_info:
656 vfree(new->cq.info);
657 err_out_free_irq:
658 if (flags & IONIC_QCQ_F_INTR) {
659 devm_free_irq(dev, new->intr.vector, &new->napi);
660 ionic_intr_free(lif->ionic, new->intr.index);
661 }
662 err_out_free_q_info:
663 vfree(new->q.info);
664 err_out_free_qcq:
665 devm_kfree(dev, new);
666 err_out:
667 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
668 return err;
669 }
670
ionic_qcqs_alloc(struct ionic_lif * lif)671 static int ionic_qcqs_alloc(struct ionic_lif *lif)
672 {
673 struct device *dev = lif->ionic->dev;
674 unsigned int flags;
675 int err;
676
677 flags = IONIC_QCQ_F_INTR;
678 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
679 IONIC_ADMINQ_LENGTH,
680 sizeof(struct ionic_admin_cmd),
681 sizeof(struct ionic_admin_comp),
682 0, lif->kern_pid, &lif->adminqcq);
683 if (err)
684 return err;
685 ionic_debugfs_add_qcq(lif, lif->adminqcq);
686
687 if (lif->ionic->nnqs_per_lif) {
688 flags = IONIC_QCQ_F_NOTIFYQ;
689 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
690 flags, IONIC_NOTIFYQ_LENGTH,
691 sizeof(struct ionic_notifyq_cmd),
692 sizeof(union ionic_notifyq_comp),
693 0, lif->kern_pid, &lif->notifyqcq);
694 if (err)
695 goto err_out;
696 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
697
698 /* Let the notifyq ride on the adminq interrupt */
699 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
700 }
701
702 err = -ENOMEM;
703 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
704 sizeof(*lif->txqcqs), GFP_KERNEL);
705 if (!lif->txqcqs)
706 goto err_out;
707 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
708 sizeof(*lif->rxqcqs), GFP_KERNEL);
709 if (!lif->rxqcqs)
710 goto err_out;
711
712 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
713 sizeof(*lif->txqstats), GFP_KERNEL);
714 if (!lif->txqstats)
715 goto err_out;
716 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
717 sizeof(*lif->rxqstats), GFP_KERNEL);
718 if (!lif->rxqstats)
719 goto err_out;
720
721 return 0;
722
723 err_out:
724 ionic_qcqs_free(lif);
725 return err;
726 }
727
ionic_qcq_sanitize(struct ionic_qcq * qcq)728 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
729 {
730 qcq->q.tail_idx = 0;
731 qcq->q.head_idx = 0;
732 qcq->cq.tail_idx = 0;
733 qcq->cq.done_color = 1;
734 memset(qcq->q_base, 0, qcq->q_size);
735 memset(qcq->cq_base, 0, qcq->cq_size);
736 memset(qcq->sg_base, 0, qcq->sg_size);
737 }
738
ionic_lif_txq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)739 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
740 {
741 struct device *dev = lif->ionic->dev;
742 struct ionic_queue *q = &qcq->q;
743 struct ionic_cq *cq = &qcq->cq;
744 struct ionic_admin_ctx ctx = {
745 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
746 .cmd.q_init = {
747 .opcode = IONIC_CMD_Q_INIT,
748 .lif_index = cpu_to_le16(lif->index),
749 .type = q->type,
750 .ver = lif->qtype_info[q->type].version,
751 .index = cpu_to_le32(q->index),
752 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
753 IONIC_QINIT_F_SG),
754 .pid = cpu_to_le16(q->pid),
755 .ring_size = ilog2(q->num_descs),
756 .ring_base = cpu_to_le64(q->base_pa),
757 .cq_ring_base = cpu_to_le64(cq->base_pa),
758 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
759 .features = cpu_to_le64(q->features),
760 },
761 };
762 unsigned int intr_index;
763 int err;
764
765 intr_index = qcq->intr.index;
766
767 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
768
769 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
770 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
771 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
772 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
773 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
774 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
775 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
776
777 ionic_qcq_sanitize(qcq);
778
779 err = ionic_adminq_post_wait(lif, &ctx);
780 if (err)
781 return err;
782
783 q->hw_type = ctx.comp.q_init.hw_type;
784 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
785 q->dbval = IONIC_DBELL_QID(q->hw_index);
786
787 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
788 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
789
790 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
791 q->dbell_jiffies = jiffies;
792
793 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
794 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
795 qcq->napi_qcq = qcq;
796 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
797 }
798
799 qcq->flags |= IONIC_QCQ_F_INITED;
800
801 return 0;
802 }
803
ionic_lif_rxq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)804 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
805 {
806 struct device *dev = lif->ionic->dev;
807 struct ionic_queue *q = &qcq->q;
808 struct ionic_cq *cq = &qcq->cq;
809 struct ionic_admin_ctx ctx = {
810 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
811 .cmd.q_init = {
812 .opcode = IONIC_CMD_Q_INIT,
813 .lif_index = cpu_to_le16(lif->index),
814 .type = q->type,
815 .ver = lif->qtype_info[q->type].version,
816 .index = cpu_to_le32(q->index),
817 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
818 IONIC_QINIT_F_SG),
819 .intr_index = cpu_to_le16(cq->bound_intr->index),
820 .pid = cpu_to_le16(q->pid),
821 .ring_size = ilog2(q->num_descs),
822 .ring_base = cpu_to_le64(q->base_pa),
823 .cq_ring_base = cpu_to_le64(cq->base_pa),
824 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
825 .features = cpu_to_le64(q->features),
826 },
827 };
828 int err;
829
830 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
831 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
832 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
833 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
834 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
835 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
836 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
837
838 ionic_qcq_sanitize(qcq);
839
840 err = ionic_adminq_post_wait(lif, &ctx);
841 if (err)
842 return err;
843
844 q->hw_type = ctx.comp.q_init.hw_type;
845 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
846 q->dbval = IONIC_DBELL_QID(q->hw_index);
847
848 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
849 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
850
851 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
852 q->dbell_jiffies = jiffies;
853
854 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
855 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
856 else
857 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
858
859 qcq->napi_qcq = qcq;
860 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
861
862 qcq->flags |= IONIC_QCQ_F_INITED;
863
864 return 0;
865 }
866
ionic_lif_create_hwstamp_txq(struct ionic_lif * lif)867 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
868 {
869 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
870 unsigned int txq_i, flags;
871 struct ionic_qcq *txq;
872 u64 features;
873 int err;
874
875 if (lif->hwstamp_txq)
876 return 0;
877
878 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
879
880 num_desc = IONIC_MIN_TXRX_DESC;
881 desc_sz = sizeof(struct ionic_txq_desc);
882 comp_sz = 2 * sizeof(struct ionic_txq_comp);
883
884 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
885 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
886 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
887 else
888 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
889
890 txq_i = lif->ionic->ntxqs_per_lif;
891 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
892
893 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
894 num_desc, desc_sz, comp_sz, sg_desc_sz,
895 lif->kern_pid, &txq);
896 if (err)
897 goto err_qcq_alloc;
898
899 txq->q.features = features;
900
901 ionic_link_qcq_interrupts(lif->adminqcq, txq);
902 ionic_debugfs_add_qcq(lif, txq);
903
904 lif->hwstamp_txq = txq;
905
906 if (netif_running(lif->netdev)) {
907 err = ionic_lif_txq_init(lif, txq);
908 if (err)
909 goto err_qcq_init;
910
911 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
912 err = ionic_qcq_enable(txq);
913 if (err)
914 goto err_qcq_enable;
915 }
916 }
917
918 return 0;
919
920 err_qcq_enable:
921 ionic_lif_qcq_deinit(lif, txq);
922 err_qcq_init:
923 lif->hwstamp_txq = NULL;
924 ionic_debugfs_del_qcq(txq);
925 ionic_qcq_free(lif, txq);
926 devm_kfree(lif->ionic->dev, txq);
927 err_qcq_alloc:
928 return err;
929 }
930
ionic_lif_create_hwstamp_rxq(struct ionic_lif * lif)931 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
932 {
933 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
934 unsigned int rxq_i, flags;
935 struct ionic_qcq *rxq;
936 u64 features;
937 int err;
938
939 if (lif->hwstamp_rxq)
940 return 0;
941
942 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
943
944 num_desc = IONIC_MIN_TXRX_DESC;
945 desc_sz = sizeof(struct ionic_rxq_desc);
946 comp_sz = 2 * sizeof(struct ionic_rxq_comp);
947 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
948
949 rxq_i = lif->ionic->nrxqs_per_lif;
950 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
951
952 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
953 num_desc, desc_sz, comp_sz, sg_desc_sz,
954 lif->kern_pid, &rxq);
955 if (err)
956 goto err_qcq_alloc;
957
958 rxq->q.features = features;
959
960 ionic_link_qcq_interrupts(lif->adminqcq, rxq);
961 ionic_debugfs_add_qcq(lif, rxq);
962
963 lif->hwstamp_rxq = rxq;
964
965 if (netif_running(lif->netdev)) {
966 err = ionic_lif_rxq_init(lif, rxq);
967 if (err)
968 goto err_qcq_init;
969
970 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
971 ionic_rx_fill(&rxq->q);
972 err = ionic_qcq_enable(rxq);
973 if (err)
974 goto err_qcq_enable;
975 }
976 }
977
978 return 0;
979
980 err_qcq_enable:
981 ionic_lif_qcq_deinit(lif, rxq);
982 err_qcq_init:
983 lif->hwstamp_rxq = NULL;
984 ionic_debugfs_del_qcq(rxq);
985 ionic_qcq_free(lif, rxq);
986 devm_kfree(lif->ionic->dev, rxq);
987 err_qcq_alloc:
988 return err;
989 }
990
ionic_lif_config_hwstamp_rxq_all(struct ionic_lif * lif,bool rx_all)991 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
992 {
993 struct ionic_queue_params qparam;
994
995 ionic_init_queue_params(lif, &qparam);
996
997 if (rx_all)
998 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
999 else
1000 qparam.rxq_features = 0;
1001
1002 /* if we're not running, just set the values and return */
1003 if (!netif_running(lif->netdev)) {
1004 lif->rxq_features = qparam.rxq_features;
1005 return 0;
1006 }
1007
1008 return ionic_reconfigure_queues(lif, &qparam);
1009 }
1010
ionic_lif_set_hwstamp_txmode(struct ionic_lif * lif,u16 txstamp_mode)1011 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1012 {
1013 struct ionic_admin_ctx ctx = {
1014 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1015 .cmd.lif_setattr = {
1016 .opcode = IONIC_CMD_LIF_SETATTR,
1017 .index = cpu_to_le16(lif->index),
1018 .attr = IONIC_LIF_ATTR_TXSTAMP,
1019 .txstamp_mode = cpu_to_le16(txstamp_mode),
1020 },
1021 };
1022
1023 return ionic_adminq_post_wait(lif, &ctx);
1024 }
1025
ionic_lif_del_hwstamp_rxfilt(struct ionic_lif * lif)1026 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1027 {
1028 struct ionic_admin_ctx ctx = {
1029 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1030 .cmd.rx_filter_del = {
1031 .opcode = IONIC_CMD_RX_FILTER_DEL,
1032 .lif_index = cpu_to_le16(lif->index),
1033 },
1034 };
1035 struct ionic_rx_filter *f;
1036 u32 filter_id;
1037 int err;
1038
1039 spin_lock_bh(&lif->rx_filters.lock);
1040
1041 f = ionic_rx_filter_rxsteer(lif);
1042 if (!f) {
1043 spin_unlock_bh(&lif->rx_filters.lock);
1044 return;
1045 }
1046
1047 filter_id = f->filter_id;
1048 ionic_rx_filter_free(lif, f);
1049
1050 spin_unlock_bh(&lif->rx_filters.lock);
1051
1052 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1053
1054 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1055
1056 err = ionic_adminq_post_wait(lif, &ctx);
1057 if (err && err != -EEXIST)
1058 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1059 }
1060
ionic_lif_add_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1061 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1062 {
1063 struct ionic_admin_ctx ctx = {
1064 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1065 .cmd.rx_filter_add = {
1066 .opcode = IONIC_CMD_RX_FILTER_ADD,
1067 .lif_index = cpu_to_le16(lif->index),
1068 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1069 .pkt_class = cpu_to_le64(pkt_class),
1070 },
1071 };
1072 u8 qtype;
1073 u32 qid;
1074 int err;
1075
1076 if (!lif->hwstamp_rxq)
1077 return -EINVAL;
1078
1079 qtype = lif->hwstamp_rxq->q.type;
1080 ctx.cmd.rx_filter_add.qtype = qtype;
1081
1082 qid = lif->hwstamp_rxq->q.index;
1083 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1084
1085 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1086 err = ionic_adminq_post_wait(lif, &ctx);
1087 if (err && err != -EEXIST)
1088 return err;
1089
1090 spin_lock_bh(&lif->rx_filters.lock);
1091 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1092 spin_unlock_bh(&lif->rx_filters.lock);
1093
1094 return err;
1095 }
1096
ionic_lif_set_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1097 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1098 {
1099 ionic_lif_del_hwstamp_rxfilt(lif);
1100
1101 if (!pkt_class)
1102 return 0;
1103
1104 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1105 }
1106
ionic_notifyq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1107 static bool ionic_notifyq_service(struct ionic_cq *cq,
1108 struct ionic_cq_info *cq_info)
1109 {
1110 union ionic_notifyq_comp *comp = cq_info->cq_desc;
1111 struct ionic_deferred_work *work;
1112 struct net_device *netdev;
1113 struct ionic_queue *q;
1114 struct ionic_lif *lif;
1115 u64 eid;
1116
1117 q = cq->bound_q;
1118 lif = q->info[0].cb_arg;
1119 netdev = lif->netdev;
1120 eid = le64_to_cpu(comp->event.eid);
1121
1122 /* Have we run out of new completions to process? */
1123 if ((s64)(eid - lif->last_eid) <= 0)
1124 return false;
1125
1126 lif->last_eid = eid;
1127
1128 dev_dbg(lif->ionic->dev, "notifyq event:\n");
1129 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1130 comp, sizeof(*comp), true);
1131
1132 switch (le16_to_cpu(comp->event.ecode)) {
1133 case IONIC_EVENT_LINK_CHANGE:
1134 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1135 break;
1136 case IONIC_EVENT_RESET:
1137 if (lif->ionic->idev.fw_status_ready &&
1138 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
1139 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
1140 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1141 if (!work) {
1142 netdev_err(lif->netdev, "Reset event dropped\n");
1143 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
1144 } else {
1145 work->type = IONIC_DW_TYPE_LIF_RESET;
1146 ionic_lif_deferred_enqueue(&lif->deferred, work);
1147 }
1148 }
1149 break;
1150 default:
1151 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1152 comp->event.ecode, eid);
1153 break;
1154 }
1155
1156 return true;
1157 }
1158
ionic_adminq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1159 static bool ionic_adminq_service(struct ionic_cq *cq,
1160 struct ionic_cq_info *cq_info)
1161 {
1162 struct ionic_admin_comp *comp = cq_info->cq_desc;
1163
1164 if (!color_match(comp->color, cq->done_color))
1165 return false;
1166
1167 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1168
1169 return true;
1170 }
1171
ionic_adminq_napi(struct napi_struct * napi,int budget)1172 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1173 {
1174 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1175 struct ionic_lif *lif = napi_to_cq(napi)->lif;
1176 struct ionic_dev *idev = &lif->ionic->idev;
1177 unsigned long irqflags;
1178 unsigned int flags = 0;
1179 bool resched = false;
1180 int rx_work = 0;
1181 int tx_work = 0;
1182 int n_work = 0;
1183 int a_work = 0;
1184 int work_done;
1185 int credits;
1186
1187 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1188 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1189 ionic_notifyq_service, NULL, NULL);
1190
1191 spin_lock_irqsave(&lif->adminq_lock, irqflags);
1192 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1193 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1194 ionic_adminq_service, NULL, NULL);
1195 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1196
1197 if (lif->hwstamp_rxq)
1198 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1199 ionic_rx_service, NULL, NULL);
1200
1201 if (lif->hwstamp_txq)
1202 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1203 ionic_tx_service, NULL, NULL);
1204
1205 work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1206 if (work_done < budget && napi_complete_done(napi, work_done)) {
1207 flags |= IONIC_INTR_CRED_UNMASK;
1208 intr->rearm_count++;
1209 }
1210
1211 if (work_done || flags) {
1212 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1213 credits = n_work + a_work + rx_work + tx_work;
1214 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1215 }
1216
1217 if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
1218 resched = true;
1219 if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
1220 resched = true;
1221 if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
1222 resched = true;
1223 if (resched)
1224 mod_timer(&lif->adminqcq->napi_deadline,
1225 jiffies + IONIC_NAPI_DEADLINE);
1226
1227 return work_done;
1228 }
1229
ionic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * ns)1230 void ionic_get_stats64(struct net_device *netdev,
1231 struct rtnl_link_stats64 *ns)
1232 {
1233 struct ionic_lif *lif = netdev_priv(netdev);
1234 struct ionic_lif_stats *ls;
1235
1236 memset(ns, 0, sizeof(*ns));
1237 ls = &lif->info->stats;
1238
1239 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1240 le64_to_cpu(ls->rx_mcast_packets) +
1241 le64_to_cpu(ls->rx_bcast_packets);
1242
1243 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1244 le64_to_cpu(ls->tx_mcast_packets) +
1245 le64_to_cpu(ls->tx_bcast_packets);
1246
1247 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1248 le64_to_cpu(ls->rx_mcast_bytes) +
1249 le64_to_cpu(ls->rx_bcast_bytes);
1250
1251 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1252 le64_to_cpu(ls->tx_mcast_bytes) +
1253 le64_to_cpu(ls->tx_bcast_bytes);
1254
1255 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1256 le64_to_cpu(ls->rx_mcast_drop_packets) +
1257 le64_to_cpu(ls->rx_bcast_drop_packets);
1258
1259 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1260 le64_to_cpu(ls->tx_mcast_drop_packets) +
1261 le64_to_cpu(ls->tx_bcast_drop_packets);
1262
1263 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1264
1265 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1266
1267 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1268 le64_to_cpu(ls->rx_queue_disabled) +
1269 le64_to_cpu(ls->rx_desc_fetch_error) +
1270 le64_to_cpu(ls->rx_desc_data_error);
1271
1272 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1273 le64_to_cpu(ls->tx_queue_disabled) +
1274 le64_to_cpu(ls->tx_desc_fetch_error) +
1275 le64_to_cpu(ls->tx_desc_data_error);
1276
1277 ns->rx_errors = ns->rx_over_errors +
1278 ns->rx_missed_errors;
1279
1280 ns->tx_errors = ns->tx_aborted_errors;
1281 }
1282
ionic_addr_add(struct net_device * netdev,const u8 * addr)1283 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1284 {
1285 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1286 }
1287
ionic_addr_del(struct net_device * netdev,const u8 * addr)1288 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1289 {
1290 /* Don't delete our own address from the uc list */
1291 if (ether_addr_equal(addr, netdev->dev_addr))
1292 return 0;
1293
1294 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1295 }
1296
ionic_lif_rx_mode(struct ionic_lif * lif)1297 void ionic_lif_rx_mode(struct ionic_lif *lif)
1298 {
1299 struct net_device *netdev = lif->netdev;
1300 unsigned int nfilters;
1301 unsigned int nd_flags;
1302 char buf[128];
1303 u16 rx_mode;
1304 int i;
1305 #define REMAIN(__x) (sizeof(buf) - (__x))
1306
1307 mutex_lock(&lif->config_lock);
1308
1309 /* grab the flags once for local use */
1310 nd_flags = netdev->flags;
1311
1312 rx_mode = IONIC_RX_MODE_F_UNICAST;
1313 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1314 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1315 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1316 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1317
1318 /* sync the filters */
1319 ionic_rx_filter_sync(lif);
1320
1321 /* check for overflow state
1322 * if so, we track that we overflowed and enable NIC PROMISC
1323 * else if the overflow is set and not needed
1324 * we remove our overflow flag and check the netdev flags
1325 * to see if we can disable NIC PROMISC
1326 */
1327 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1328
1329 if (((lif->nucast + lif->nmcast) >= nfilters) ||
1330 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
1331 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1332 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1333 } else {
1334 if (!(nd_flags & IFF_PROMISC))
1335 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1336 if (!(nd_flags & IFF_ALLMULTI))
1337 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1338 }
1339
1340 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1341 lif->rx_mode, rx_mode);
1342 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1343 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1344 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1345 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1346 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1347 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1348 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1349 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1350 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1351 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1352 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1353 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1354 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1355
1356 if (lif->rx_mode != rx_mode) {
1357 struct ionic_admin_ctx ctx = {
1358 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1359 .cmd.rx_mode_set = {
1360 .opcode = IONIC_CMD_RX_MODE_SET,
1361 .lif_index = cpu_to_le16(lif->index),
1362 },
1363 };
1364 int err;
1365
1366 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1367 err = ionic_adminq_post_wait(lif, &ctx);
1368 if (err)
1369 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1370 rx_mode, err);
1371 else
1372 lif->rx_mode = rx_mode;
1373 }
1374
1375 mutex_unlock(&lif->config_lock);
1376 }
1377
ionic_ndo_set_rx_mode(struct net_device * netdev)1378 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1379 {
1380 struct ionic_lif *lif = netdev_priv(netdev);
1381 struct ionic_deferred_work *work;
1382
1383 /* Sync the kernel filter list with the driver filter list */
1384 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1385 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1386
1387 /* Shove off the rest of the rxmode work to the work task
1388 * which will include syncing the filters to the firmware.
1389 */
1390 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1391 if (!work) {
1392 netdev_err(lif->netdev, "rxmode change dropped\n");
1393 return;
1394 }
1395 work->type = IONIC_DW_TYPE_RX_MODE;
1396 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1397 ionic_lif_deferred_enqueue(&lif->deferred, work);
1398 }
1399
ionic_netdev_features_to_nic(netdev_features_t features)1400 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1401 {
1402 u64 wanted = 0;
1403
1404 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1405 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1406 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1407 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1408 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1409 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1410 if (features & NETIF_F_RXHASH)
1411 wanted |= IONIC_ETH_HW_RX_HASH;
1412 if (features & NETIF_F_RXCSUM)
1413 wanted |= IONIC_ETH_HW_RX_CSUM;
1414 if (features & NETIF_F_SG)
1415 wanted |= IONIC_ETH_HW_TX_SG;
1416 if (features & NETIF_F_HW_CSUM)
1417 wanted |= IONIC_ETH_HW_TX_CSUM;
1418 if (features & NETIF_F_TSO)
1419 wanted |= IONIC_ETH_HW_TSO;
1420 if (features & NETIF_F_TSO6)
1421 wanted |= IONIC_ETH_HW_TSO_IPV6;
1422 if (features & NETIF_F_TSO_ECN)
1423 wanted |= IONIC_ETH_HW_TSO_ECN;
1424 if (features & NETIF_F_GSO_GRE)
1425 wanted |= IONIC_ETH_HW_TSO_GRE;
1426 if (features & NETIF_F_GSO_GRE_CSUM)
1427 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1428 if (features & NETIF_F_GSO_IPXIP4)
1429 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1430 if (features & NETIF_F_GSO_IPXIP6)
1431 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1432 if (features & NETIF_F_GSO_UDP_TUNNEL)
1433 wanted |= IONIC_ETH_HW_TSO_UDP;
1434 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1435 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1436
1437 return cpu_to_le64(wanted);
1438 }
1439
ionic_set_nic_features(struct ionic_lif * lif,netdev_features_t features)1440 static int ionic_set_nic_features(struct ionic_lif *lif,
1441 netdev_features_t features)
1442 {
1443 struct device *dev = lif->ionic->dev;
1444 struct ionic_admin_ctx ctx = {
1445 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1446 .cmd.lif_setattr = {
1447 .opcode = IONIC_CMD_LIF_SETATTR,
1448 .index = cpu_to_le16(lif->index),
1449 .attr = IONIC_LIF_ATTR_FEATURES,
1450 },
1451 };
1452 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1453 IONIC_ETH_HW_VLAN_RX_STRIP |
1454 IONIC_ETH_HW_VLAN_RX_FILTER;
1455 u64 old_hw_features;
1456 int err;
1457
1458 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1459
1460 if (lif->phc)
1461 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1462
1463 err = ionic_adminq_post_wait(lif, &ctx);
1464 if (err)
1465 return err;
1466
1467 old_hw_features = lif->hw_features;
1468 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1469 ctx.comp.lif_setattr.features);
1470
1471 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1472 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1473
1474 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1475 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1476 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1477
1478 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1479 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1480 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1481 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1482 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1483 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1484 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1485 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1486 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1487 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1488 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1489 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1490 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1491 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1492 if (lif->hw_features & IONIC_ETH_HW_TSO)
1493 dev_dbg(dev, "feature ETH_HW_TSO\n");
1494 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1495 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1496 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1497 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1498 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1499 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1500 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1501 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1502 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1503 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1504 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1505 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1506 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1507 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1508 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1509 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1510 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1511 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1512
1513 return 0;
1514 }
1515
ionic_init_nic_features(struct ionic_lif * lif)1516 static int ionic_init_nic_features(struct ionic_lif *lif)
1517 {
1518 struct net_device *netdev = lif->netdev;
1519 netdev_features_t features;
1520 int err;
1521
1522 /* set up what we expect to support by default */
1523 features = NETIF_F_HW_VLAN_CTAG_TX |
1524 NETIF_F_HW_VLAN_CTAG_RX |
1525 NETIF_F_HW_VLAN_CTAG_FILTER |
1526 NETIF_F_SG |
1527 NETIF_F_HW_CSUM |
1528 NETIF_F_RXCSUM |
1529 NETIF_F_TSO |
1530 NETIF_F_TSO6 |
1531 NETIF_F_TSO_ECN;
1532
1533 if (lif->nxqs > 1)
1534 features |= NETIF_F_RXHASH;
1535
1536 err = ionic_set_nic_features(lif, features);
1537 if (err)
1538 return err;
1539
1540 /* tell the netdev what we actually can support */
1541 netdev->features |= NETIF_F_HIGHDMA;
1542
1543 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1544 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1545 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1546 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1547 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1548 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1549 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1550 netdev->hw_features |= NETIF_F_RXHASH;
1551 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1552 netdev->hw_features |= NETIF_F_SG;
1553
1554 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1555 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1556 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1557 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1558 if (lif->hw_features & IONIC_ETH_HW_TSO)
1559 netdev->hw_enc_features |= NETIF_F_TSO;
1560 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1561 netdev->hw_enc_features |= NETIF_F_TSO6;
1562 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1563 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1564 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1565 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1566 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1567 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1568 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1569 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1570 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1571 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1572 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1573 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1574 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1575 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1576
1577 netdev->hw_features |= netdev->hw_enc_features;
1578 netdev->features |= netdev->hw_features;
1579 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1580
1581 netdev->priv_flags |= IFF_UNICAST_FLT |
1582 IFF_LIVE_ADDR_CHANGE;
1583
1584 return 0;
1585 }
1586
ionic_set_features(struct net_device * netdev,netdev_features_t features)1587 static int ionic_set_features(struct net_device *netdev,
1588 netdev_features_t features)
1589 {
1590 struct ionic_lif *lif = netdev_priv(netdev);
1591 int err;
1592
1593 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1594 __func__, (u64)lif->netdev->features, (u64)features);
1595
1596 err = ionic_set_nic_features(lif, features);
1597
1598 return err;
1599 }
1600
ionic_set_attr_mac(struct ionic_lif * lif,u8 * mac)1601 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1602 {
1603 struct ionic_admin_ctx ctx = {
1604 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1605 .cmd.lif_setattr = {
1606 .opcode = IONIC_CMD_LIF_SETATTR,
1607 .index = cpu_to_le16(lif->index),
1608 .attr = IONIC_LIF_ATTR_MAC,
1609 },
1610 };
1611
1612 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1613 return ionic_adminq_post_wait(lif, &ctx);
1614 }
1615
ionic_get_attr_mac(struct ionic_lif * lif,u8 * mac_addr)1616 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1617 {
1618 struct ionic_admin_ctx ctx = {
1619 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1620 .cmd.lif_getattr = {
1621 .opcode = IONIC_CMD_LIF_GETATTR,
1622 .index = cpu_to_le16(lif->index),
1623 .attr = IONIC_LIF_ATTR_MAC,
1624 },
1625 };
1626 int err;
1627
1628 err = ionic_adminq_post_wait(lif, &ctx);
1629 if (err)
1630 return err;
1631
1632 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1633 return 0;
1634 }
1635
ionic_program_mac(struct ionic_lif * lif,u8 * mac)1636 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1637 {
1638 u8 get_mac[ETH_ALEN];
1639 int err;
1640
1641 err = ionic_set_attr_mac(lif, mac);
1642 if (err)
1643 return err;
1644
1645 err = ionic_get_attr_mac(lif, get_mac);
1646 if (err)
1647 return err;
1648
1649 /* To deal with older firmware that silently ignores the set attr mac:
1650 * doesn't actually change the mac and doesn't return an error, so we
1651 * do the get attr to verify whether or not the set actually happened
1652 */
1653 if (!ether_addr_equal(get_mac, mac))
1654 return 1;
1655
1656 return 0;
1657 }
1658
ionic_set_mac_address(struct net_device * netdev,void * sa)1659 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1660 {
1661 struct ionic_lif *lif = netdev_priv(netdev);
1662 struct sockaddr *addr = sa;
1663 u8 *mac;
1664 int err;
1665
1666 mac = (u8 *)addr->sa_data;
1667 if (ether_addr_equal(netdev->dev_addr, mac))
1668 return 0;
1669
1670 err = ionic_program_mac(lif, mac);
1671 if (err < 0)
1672 return err;
1673
1674 if (err > 0)
1675 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1676 __func__);
1677
1678 err = eth_prepare_mac_addr_change(netdev, addr);
1679 if (err)
1680 return err;
1681
1682 if (!is_zero_ether_addr(netdev->dev_addr)) {
1683 netdev_info(netdev, "deleting mac addr %pM\n",
1684 netdev->dev_addr);
1685 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1686 }
1687
1688 eth_commit_mac_addr_change(netdev, addr);
1689 netdev_info(netdev, "updating mac addr %pM\n", mac);
1690
1691 return ionic_lif_addr_add(netdev_priv(netdev), mac);
1692 }
1693
ionic_stop_queues_reconfig(struct ionic_lif * lif)1694 static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1695 {
1696 /* Stop and clean the queues before reconfiguration */
1697 netif_device_detach(lif->netdev);
1698 ionic_stop_queues(lif);
1699 ionic_txrx_deinit(lif);
1700 }
1701
ionic_start_queues_reconfig(struct ionic_lif * lif)1702 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1703 {
1704 int err;
1705
1706 /* Re-init the queues after reconfiguration */
1707
1708 /* The only way txrx_init can fail here is if communication
1709 * with FW is suddenly broken. There's not much we can do
1710 * at this point - error messages have already been printed,
1711 * so we can continue on and the user can eventually do a
1712 * DOWN and UP to try to reset and clear the issue.
1713 */
1714 err = ionic_txrx_init(lif);
1715 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1716 netif_device_attach(lif->netdev);
1717
1718 return err;
1719 }
1720
ionic_change_mtu(struct net_device * netdev,int new_mtu)1721 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1722 {
1723 struct ionic_lif *lif = netdev_priv(netdev);
1724 struct ionic_admin_ctx ctx = {
1725 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1726 .cmd.lif_setattr = {
1727 .opcode = IONIC_CMD_LIF_SETATTR,
1728 .index = cpu_to_le16(lif->index),
1729 .attr = IONIC_LIF_ATTR_MTU,
1730 .mtu = cpu_to_le32(new_mtu),
1731 },
1732 };
1733 int err;
1734
1735 err = ionic_adminq_post_wait(lif, &ctx);
1736 if (err)
1737 return err;
1738
1739 /* if we're not running, nothing more to do */
1740 if (!netif_running(netdev)) {
1741 netdev->mtu = new_mtu;
1742 return 0;
1743 }
1744
1745 mutex_lock(&lif->queue_lock);
1746 ionic_stop_queues_reconfig(lif);
1747 netdev->mtu = new_mtu;
1748 err = ionic_start_queues_reconfig(lif);
1749 mutex_unlock(&lif->queue_lock);
1750
1751 return err;
1752 }
1753
ionic_tx_timeout_work(struct work_struct * ws)1754 static void ionic_tx_timeout_work(struct work_struct *ws)
1755 {
1756 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1757
1758 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1759 return;
1760
1761 /* if we were stopped before this scheduled job was launched,
1762 * don't bother the queues as they are already stopped.
1763 */
1764 if (!netif_running(lif->netdev))
1765 return;
1766
1767 mutex_lock(&lif->queue_lock);
1768 ionic_stop_queues_reconfig(lif);
1769 ionic_start_queues_reconfig(lif);
1770 mutex_unlock(&lif->queue_lock);
1771 }
1772
ionic_tx_timeout(struct net_device * netdev,unsigned int txqueue)1773 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1774 {
1775 struct ionic_lif *lif = netdev_priv(netdev);
1776
1777 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1778 schedule_work(&lif->tx_timeout_work);
1779 }
1780
ionic_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1781 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1782 u16 vid)
1783 {
1784 struct ionic_lif *lif = netdev_priv(netdev);
1785 int err;
1786
1787 err = ionic_lif_vlan_add(lif, vid);
1788 if (err)
1789 return err;
1790
1791 ionic_lif_rx_mode(lif);
1792
1793 return 0;
1794 }
1795
ionic_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1796 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1797 u16 vid)
1798 {
1799 struct ionic_lif *lif = netdev_priv(netdev);
1800 int err;
1801
1802 err = ionic_lif_vlan_del(lif, vid);
1803 if (err)
1804 return err;
1805
1806 ionic_lif_rx_mode(lif);
1807
1808 return 0;
1809 }
1810
ionic_lif_rss_config(struct ionic_lif * lif,const u16 types,const u8 * key,const u32 * indir)1811 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1812 const u8 *key, const u32 *indir)
1813 {
1814 struct ionic_admin_ctx ctx = {
1815 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1816 .cmd.lif_setattr = {
1817 .opcode = IONIC_CMD_LIF_SETATTR,
1818 .attr = IONIC_LIF_ATTR_RSS,
1819 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1820 },
1821 };
1822 unsigned int i, tbl_sz;
1823
1824 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1825 lif->rss_types = types;
1826 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1827 }
1828
1829 if (key)
1830 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1831
1832 if (indir) {
1833 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1834 for (i = 0; i < tbl_sz; i++)
1835 lif->rss_ind_tbl[i] = indir[i];
1836 }
1837
1838 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1839 IONIC_RSS_HASH_KEY_SIZE);
1840
1841 return ionic_adminq_post_wait(lif, &ctx);
1842 }
1843
ionic_lif_rss_init(struct ionic_lif * lif)1844 static int ionic_lif_rss_init(struct ionic_lif *lif)
1845 {
1846 unsigned int tbl_sz;
1847 unsigned int i;
1848
1849 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1850 IONIC_RSS_TYPE_IPV4_TCP |
1851 IONIC_RSS_TYPE_IPV4_UDP |
1852 IONIC_RSS_TYPE_IPV6 |
1853 IONIC_RSS_TYPE_IPV6_TCP |
1854 IONIC_RSS_TYPE_IPV6_UDP;
1855
1856 /* Fill indirection table with 'default' values */
1857 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1858 for (i = 0; i < tbl_sz; i++)
1859 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1860
1861 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1862 }
1863
ionic_lif_rss_deinit(struct ionic_lif * lif)1864 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1865 {
1866 int tbl_sz;
1867
1868 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1869 memset(lif->rss_ind_tbl, 0, tbl_sz);
1870 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1871
1872 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1873 }
1874
ionic_lif_quiesce(struct ionic_lif * lif)1875 static void ionic_lif_quiesce(struct ionic_lif *lif)
1876 {
1877 struct ionic_admin_ctx ctx = {
1878 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1879 .cmd.lif_setattr = {
1880 .opcode = IONIC_CMD_LIF_SETATTR,
1881 .index = cpu_to_le16(lif->index),
1882 .attr = IONIC_LIF_ATTR_STATE,
1883 .state = IONIC_LIF_QUIESCE,
1884 },
1885 };
1886 int err;
1887
1888 err = ionic_adminq_post_wait(lif, &ctx);
1889 if (err)
1890 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
1891 }
1892
ionic_txrx_disable(struct ionic_lif * lif)1893 static void ionic_txrx_disable(struct ionic_lif *lif)
1894 {
1895 unsigned int i;
1896 int err = 0;
1897
1898 if (lif->txqcqs) {
1899 for (i = 0; i < lif->nxqs; i++)
1900 err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
1901 }
1902
1903 if (lif->hwstamp_txq)
1904 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
1905
1906 if (lif->rxqcqs) {
1907 for (i = 0; i < lif->nxqs; i++)
1908 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
1909 }
1910
1911 if (lif->hwstamp_rxq)
1912 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
1913
1914 ionic_lif_quiesce(lif);
1915 }
1916
ionic_txrx_deinit(struct ionic_lif * lif)1917 static void ionic_txrx_deinit(struct ionic_lif *lif)
1918 {
1919 unsigned int i;
1920
1921 if (lif->txqcqs) {
1922 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1923 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1924 ionic_tx_flush(&lif->txqcqs[i]->cq);
1925 ionic_tx_empty(&lif->txqcqs[i]->q);
1926 }
1927 }
1928
1929 if (lif->rxqcqs) {
1930 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1931 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1932 ionic_rx_empty(&lif->rxqcqs[i]->q);
1933 }
1934 }
1935 lif->rx_mode = 0;
1936
1937 if (lif->hwstamp_txq) {
1938 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
1939 ionic_tx_flush(&lif->hwstamp_txq->cq);
1940 ionic_tx_empty(&lif->hwstamp_txq->q);
1941 }
1942
1943 if (lif->hwstamp_rxq) {
1944 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
1945 ionic_rx_empty(&lif->hwstamp_rxq->q);
1946 }
1947 }
1948
ionic_txrx_free(struct ionic_lif * lif)1949 static void ionic_txrx_free(struct ionic_lif *lif)
1950 {
1951 unsigned int i;
1952
1953 if (lif->txqcqs) {
1954 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
1955 ionic_qcq_free(lif, lif->txqcqs[i]);
1956 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
1957 lif->txqcqs[i] = NULL;
1958 }
1959 }
1960
1961 if (lif->rxqcqs) {
1962 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
1963 ionic_qcq_free(lif, lif->rxqcqs[i]);
1964 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
1965 lif->rxqcqs[i] = NULL;
1966 }
1967 }
1968
1969 if (lif->hwstamp_txq) {
1970 ionic_qcq_free(lif, lif->hwstamp_txq);
1971 devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
1972 lif->hwstamp_txq = NULL;
1973 }
1974
1975 if (lif->hwstamp_rxq) {
1976 ionic_qcq_free(lif, lif->hwstamp_rxq);
1977 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
1978 lif->hwstamp_rxq = NULL;
1979 }
1980 }
1981
ionic_txrx_alloc(struct ionic_lif * lif)1982 static int ionic_txrx_alloc(struct ionic_lif *lif)
1983 {
1984 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
1985 unsigned int flags, i;
1986 int err = 0;
1987
1988 num_desc = lif->ntxq_descs;
1989 desc_sz = sizeof(struct ionic_txq_desc);
1990 comp_sz = sizeof(struct ionic_txq_comp);
1991
1992 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1993 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1994 sizeof(struct ionic_txq_sg_desc_v1))
1995 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1996 else
1997 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1998
1999 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2000 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2001 flags |= IONIC_QCQ_F_INTR;
2002 for (i = 0; i < lif->nxqs; i++) {
2003 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2004 num_desc, desc_sz, comp_sz, sg_desc_sz,
2005 lif->kern_pid, &lif->txqcqs[i]);
2006 if (err)
2007 goto err_out;
2008
2009 if (flags & IONIC_QCQ_F_INTR) {
2010 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2011 lif->txqcqs[i]->intr.index,
2012 lif->tx_coalesce_hw);
2013 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2014 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2015 }
2016
2017 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2018 }
2019
2020 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2021
2022 num_desc = lif->nrxq_descs;
2023 desc_sz = sizeof(struct ionic_rxq_desc);
2024 comp_sz = sizeof(struct ionic_rxq_comp);
2025 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2026
2027 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2028 comp_sz *= 2;
2029
2030 for (i = 0; i < lif->nxqs; i++) {
2031 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2032 num_desc, desc_sz, comp_sz, sg_desc_sz,
2033 lif->kern_pid, &lif->rxqcqs[i]);
2034 if (err)
2035 goto err_out;
2036
2037 lif->rxqcqs[i]->q.features = lif->rxq_features;
2038
2039 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2040 lif->rxqcqs[i]->intr.index,
2041 lif->rx_coalesce_hw);
2042 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2043 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2044
2045 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2046 ionic_link_qcq_interrupts(lif->rxqcqs[i],
2047 lif->txqcqs[i]);
2048
2049 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2050 }
2051
2052 return 0;
2053
2054 err_out:
2055 ionic_txrx_free(lif);
2056
2057 return err;
2058 }
2059
ionic_txrx_init(struct ionic_lif * lif)2060 static int ionic_txrx_init(struct ionic_lif *lif)
2061 {
2062 unsigned int i;
2063 int err;
2064
2065 for (i = 0; i < lif->nxqs; i++) {
2066 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2067 if (err)
2068 goto err_out;
2069
2070 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2071 if (err) {
2072 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2073 goto err_out;
2074 }
2075 }
2076
2077 if (lif->netdev->features & NETIF_F_RXHASH)
2078 ionic_lif_rss_init(lif);
2079
2080 ionic_lif_rx_mode(lif);
2081
2082 return 0;
2083
2084 err_out:
2085 while (i--) {
2086 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2087 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2088 }
2089
2090 return err;
2091 }
2092
ionic_txrx_enable(struct ionic_lif * lif)2093 static int ionic_txrx_enable(struct ionic_lif *lif)
2094 {
2095 int derr = 0;
2096 int i, err;
2097
2098 for (i = 0; i < lif->nxqs; i++) {
2099 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2100 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2101 err = -ENXIO;
2102 goto err_out;
2103 }
2104
2105 ionic_rx_fill(&lif->rxqcqs[i]->q);
2106 err = ionic_qcq_enable(lif->rxqcqs[i]);
2107 if (err)
2108 goto err_out;
2109
2110 err = ionic_qcq_enable(lif->txqcqs[i]);
2111 if (err) {
2112 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
2113 goto err_out;
2114 }
2115 }
2116
2117 if (lif->hwstamp_rxq) {
2118 ionic_rx_fill(&lif->hwstamp_rxq->q);
2119 err = ionic_qcq_enable(lif->hwstamp_rxq);
2120 if (err)
2121 goto err_out_hwstamp_rx;
2122 }
2123
2124 if (lif->hwstamp_txq) {
2125 err = ionic_qcq_enable(lif->hwstamp_txq);
2126 if (err)
2127 goto err_out_hwstamp_tx;
2128 }
2129
2130 return 0;
2131
2132 err_out_hwstamp_tx:
2133 if (lif->hwstamp_rxq)
2134 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
2135 err_out_hwstamp_rx:
2136 i = lif->nxqs;
2137 err_out:
2138 while (i--) {
2139 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
2140 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
2141 }
2142
2143 return err;
2144 }
2145
ionic_start_queues(struct ionic_lif * lif)2146 static int ionic_start_queues(struct ionic_lif *lif)
2147 {
2148 int err;
2149
2150 if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2151 return -EIO;
2152
2153 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2154 return -EBUSY;
2155
2156 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2157 return 0;
2158
2159 err = ionic_txrx_enable(lif);
2160 if (err) {
2161 clear_bit(IONIC_LIF_F_UP, lif->state);
2162 return err;
2163 }
2164 netif_tx_wake_all_queues(lif->netdev);
2165
2166 return 0;
2167 }
2168
ionic_open(struct net_device * netdev)2169 static int ionic_open(struct net_device *netdev)
2170 {
2171 struct ionic_lif *lif = netdev_priv(netdev);
2172 int err;
2173
2174 /* If recovering from a broken state, clear the bit and we'll try again */
2175 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2176 netdev_info(netdev, "clearing broken state\n");
2177
2178 mutex_lock(&lif->queue_lock);
2179
2180 err = ionic_txrx_alloc(lif);
2181 if (err)
2182 goto err_unlock;
2183
2184 err = ionic_txrx_init(lif);
2185 if (err)
2186 goto err_txrx_free;
2187
2188 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2189 if (err)
2190 goto err_txrx_deinit;
2191
2192 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2193 if (err)
2194 goto err_txrx_deinit;
2195
2196 /* don't start the queues until we have link */
2197 if (netif_carrier_ok(netdev)) {
2198 err = ionic_start_queues(lif);
2199 if (err)
2200 goto err_txrx_deinit;
2201 }
2202
2203 /* If hardware timestamping is enabled, but the queues were freed by
2204 * ionic_stop, those need to be reallocated and initialized, too.
2205 */
2206 ionic_lif_hwstamp_recreate_queues(lif);
2207
2208 mutex_unlock(&lif->queue_lock);
2209
2210 return 0;
2211
2212 err_txrx_deinit:
2213 ionic_txrx_deinit(lif);
2214 err_txrx_free:
2215 ionic_txrx_free(lif);
2216 err_unlock:
2217 mutex_unlock(&lif->queue_lock);
2218 return err;
2219 }
2220
ionic_stop_queues(struct ionic_lif * lif)2221 static void ionic_stop_queues(struct ionic_lif *lif)
2222 {
2223 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2224 return;
2225
2226 netif_tx_disable(lif->netdev);
2227 ionic_txrx_disable(lif);
2228 }
2229
ionic_stop(struct net_device * netdev)2230 static int ionic_stop(struct net_device *netdev)
2231 {
2232 struct ionic_lif *lif = netdev_priv(netdev);
2233
2234 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2235 return 0;
2236
2237 mutex_lock(&lif->queue_lock);
2238 ionic_stop_queues(lif);
2239 ionic_txrx_deinit(lif);
2240 ionic_txrx_free(lif);
2241 mutex_unlock(&lif->queue_lock);
2242
2243 return 0;
2244 }
2245
ionic_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2246 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2247 {
2248 struct ionic_lif *lif = netdev_priv(netdev);
2249
2250 switch (cmd) {
2251 case SIOCSHWTSTAMP:
2252 return ionic_lif_hwstamp_set(lif, ifr);
2253 case SIOCGHWTSTAMP:
2254 return ionic_lif_hwstamp_get(lif, ifr);
2255 default:
2256 return -EOPNOTSUPP;
2257 }
2258 }
2259
ionic_update_cached_vf_config(struct ionic * ionic,int vf)2260 static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
2261 {
2262 struct ionic_vf_getattr_comp comp = { 0 };
2263 int err;
2264 u8 attr;
2265
2266 attr = IONIC_VF_ATTR_VLAN;
2267 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2268 if (err && comp.status != IONIC_RC_ENOSUPP)
2269 goto err_out;
2270 if (!err)
2271 ionic->vfs[vf].vlanid = comp.vlanid;
2272
2273 attr = IONIC_VF_ATTR_SPOOFCHK;
2274 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2275 if (err && comp.status != IONIC_RC_ENOSUPP)
2276 goto err_out;
2277 if (!err)
2278 ionic->vfs[vf].spoofchk = comp.spoofchk;
2279
2280 attr = IONIC_VF_ATTR_LINKSTATE;
2281 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2282 if (err && comp.status != IONIC_RC_ENOSUPP)
2283 goto err_out;
2284 if (!err) {
2285 switch (comp.linkstate) {
2286 case IONIC_VF_LINK_STATUS_UP:
2287 ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_ENABLE;
2288 break;
2289 case IONIC_VF_LINK_STATUS_DOWN:
2290 ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_DISABLE;
2291 break;
2292 case IONIC_VF_LINK_STATUS_AUTO:
2293 ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_AUTO;
2294 break;
2295 default:
2296 dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
2297 break;
2298 }
2299 }
2300
2301 attr = IONIC_VF_ATTR_RATE;
2302 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2303 if (err && comp.status != IONIC_RC_ENOSUPP)
2304 goto err_out;
2305 if (!err)
2306 ionic->vfs[vf].maxrate = comp.maxrate;
2307
2308 attr = IONIC_VF_ATTR_TRUST;
2309 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2310 if (err && comp.status != IONIC_RC_ENOSUPP)
2311 goto err_out;
2312 if (!err)
2313 ionic->vfs[vf].trusted = comp.trust;
2314
2315 attr = IONIC_VF_ATTR_MAC;
2316 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2317 if (err && comp.status != IONIC_RC_ENOSUPP)
2318 goto err_out;
2319 if (!err)
2320 ether_addr_copy(ionic->vfs[vf].macaddr, comp.macaddr);
2321
2322 err_out:
2323 if (err)
2324 dev_err(ionic->dev, "Failed to get %s for VF %d\n",
2325 ionic_vf_attr_to_str(attr), vf);
2326
2327 return err;
2328 }
2329
ionic_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivf)2330 static int ionic_get_vf_config(struct net_device *netdev,
2331 int vf, struct ifla_vf_info *ivf)
2332 {
2333 struct ionic_lif *lif = netdev_priv(netdev);
2334 struct ionic *ionic = lif->ionic;
2335 int ret = 0;
2336
2337 if (!netif_device_present(netdev))
2338 return -EBUSY;
2339
2340 down_read(&ionic->vf_op_lock);
2341
2342 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2343 ret = -EINVAL;
2344 } else {
2345 ivf->vf = vf;
2346 ivf->qos = 0;
2347
2348 ret = ionic_update_cached_vf_config(ionic, vf);
2349 if (!ret) {
2350 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
2351 ivf->spoofchk = ionic->vfs[vf].spoofchk;
2352 ivf->linkstate = ionic->vfs[vf].linkstate;
2353 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
2354 ivf->trusted = ionic->vfs[vf].trusted;
2355 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
2356 }
2357 }
2358
2359 up_read(&ionic->vf_op_lock);
2360 return ret;
2361 }
2362
ionic_get_vf_stats(struct net_device * netdev,int vf,struct ifla_vf_stats * vf_stats)2363 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2364 struct ifla_vf_stats *vf_stats)
2365 {
2366 struct ionic_lif *lif = netdev_priv(netdev);
2367 struct ionic *ionic = lif->ionic;
2368 struct ionic_lif_stats *vs;
2369 int ret = 0;
2370
2371 if (!netif_device_present(netdev))
2372 return -EBUSY;
2373
2374 down_read(&ionic->vf_op_lock);
2375
2376 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2377 ret = -EINVAL;
2378 } else {
2379 memset(vf_stats, 0, sizeof(*vf_stats));
2380 vs = &ionic->vfs[vf].stats;
2381
2382 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2383 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2384 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
2385 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
2386 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
2387 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
2388 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2389 le64_to_cpu(vs->rx_mcast_drop_packets) +
2390 le64_to_cpu(vs->rx_bcast_drop_packets);
2391 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2392 le64_to_cpu(vs->tx_mcast_drop_packets) +
2393 le64_to_cpu(vs->tx_bcast_drop_packets);
2394 }
2395
2396 up_read(&ionic->vf_op_lock);
2397 return ret;
2398 }
2399
ionic_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2400 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2401 {
2402 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
2403 struct ionic_lif *lif = netdev_priv(netdev);
2404 struct ionic *ionic = lif->ionic;
2405 int ret;
2406
2407 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2408 return -EINVAL;
2409
2410 if (!netif_device_present(netdev))
2411 return -EBUSY;
2412
2413 down_write(&ionic->vf_op_lock);
2414
2415 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2416 ret = -EINVAL;
2417 } else {
2418 ether_addr_copy(vfc.macaddr, mac);
2419 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
2420 __func__, vf, vfc.macaddr);
2421
2422 ret = ionic_set_vf_config(ionic, vf, &vfc);
2423 if (!ret)
2424 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2425 }
2426
2427 up_write(&ionic->vf_op_lock);
2428 return ret;
2429 }
2430
ionic_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2431 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2432 u8 qos, __be16 proto)
2433 {
2434 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
2435 struct ionic_lif *lif = netdev_priv(netdev);
2436 struct ionic *ionic = lif->ionic;
2437 int ret;
2438
2439 /* until someday when we support qos */
2440 if (qos)
2441 return -EINVAL;
2442
2443 if (vlan > 4095)
2444 return -EINVAL;
2445
2446 if (proto != htons(ETH_P_8021Q))
2447 return -EPROTONOSUPPORT;
2448
2449 if (!netif_device_present(netdev))
2450 return -EBUSY;
2451
2452 down_write(&ionic->vf_op_lock);
2453
2454 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2455 ret = -EINVAL;
2456 } else {
2457 vfc.vlanid = cpu_to_le16(vlan);
2458 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
2459 __func__, vf, le16_to_cpu(vfc.vlanid));
2460
2461 ret = ionic_set_vf_config(ionic, vf, &vfc);
2462 if (!ret)
2463 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2464 }
2465
2466 up_write(&ionic->vf_op_lock);
2467 return ret;
2468 }
2469
ionic_set_vf_rate(struct net_device * netdev,int vf,int tx_min,int tx_max)2470 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2471 int tx_min, int tx_max)
2472 {
2473 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
2474 struct ionic_lif *lif = netdev_priv(netdev);
2475 struct ionic *ionic = lif->ionic;
2476 int ret;
2477
2478 /* setting the min just seems silly */
2479 if (tx_min)
2480 return -EINVAL;
2481
2482 if (!netif_device_present(netdev))
2483 return -EBUSY;
2484
2485 down_write(&ionic->vf_op_lock);
2486
2487 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2488 ret = -EINVAL;
2489 } else {
2490 vfc.maxrate = cpu_to_le32(tx_max);
2491 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
2492 __func__, vf, le32_to_cpu(vfc.maxrate));
2493
2494 ret = ionic_set_vf_config(ionic, vf, &vfc);
2495 if (!ret)
2496 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2497 }
2498
2499 up_write(&ionic->vf_op_lock);
2500 return ret;
2501 }
2502
ionic_set_vf_spoofchk(struct net_device * netdev,int vf,bool set)2503 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2504 {
2505 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
2506 struct ionic_lif *lif = netdev_priv(netdev);
2507 struct ionic *ionic = lif->ionic;
2508 int ret;
2509
2510 if (!netif_device_present(netdev))
2511 return -EBUSY;
2512
2513 down_write(&ionic->vf_op_lock);
2514
2515 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2516 ret = -EINVAL;
2517 } else {
2518 vfc.spoofchk = set;
2519 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
2520 __func__, vf, vfc.spoofchk);
2521
2522 ret = ionic_set_vf_config(ionic, vf, &vfc);
2523 if (!ret)
2524 ionic->vfs[vf].spoofchk = set;
2525 }
2526
2527 up_write(&ionic->vf_op_lock);
2528 return ret;
2529 }
2530
ionic_set_vf_trust(struct net_device * netdev,int vf,bool set)2531 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2532 {
2533 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
2534 struct ionic_lif *lif = netdev_priv(netdev);
2535 struct ionic *ionic = lif->ionic;
2536 int ret;
2537
2538 if (!netif_device_present(netdev))
2539 return -EBUSY;
2540
2541 down_write(&ionic->vf_op_lock);
2542
2543 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2544 ret = -EINVAL;
2545 } else {
2546 vfc.trust = set;
2547 dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
2548 __func__, vf, vfc.trust);
2549
2550 ret = ionic_set_vf_config(ionic, vf, &vfc);
2551 if (!ret)
2552 ionic->vfs[vf].trusted = set;
2553 }
2554
2555 up_write(&ionic->vf_op_lock);
2556 return ret;
2557 }
2558
ionic_set_vf_link_state(struct net_device * netdev,int vf,int set)2559 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2560 {
2561 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
2562 struct ionic_lif *lif = netdev_priv(netdev);
2563 struct ionic *ionic = lif->ionic;
2564 u8 vfls;
2565 int ret;
2566
2567 switch (set) {
2568 case IFLA_VF_LINK_STATE_ENABLE:
2569 vfls = IONIC_VF_LINK_STATUS_UP;
2570 break;
2571 case IFLA_VF_LINK_STATE_DISABLE:
2572 vfls = IONIC_VF_LINK_STATUS_DOWN;
2573 break;
2574 case IFLA_VF_LINK_STATE_AUTO:
2575 vfls = IONIC_VF_LINK_STATUS_AUTO;
2576 break;
2577 default:
2578 return -EINVAL;
2579 }
2580
2581 if (!netif_device_present(netdev))
2582 return -EBUSY;
2583
2584 down_write(&ionic->vf_op_lock);
2585
2586 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2587 ret = -EINVAL;
2588 } else {
2589 vfc.linkstate = vfls;
2590 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
2591 __func__, vf, vfc.linkstate);
2592
2593 ret = ionic_set_vf_config(ionic, vf, &vfc);
2594 if (!ret)
2595 ionic->vfs[vf].linkstate = set;
2596 }
2597
2598 up_write(&ionic->vf_op_lock);
2599 return ret;
2600 }
2601
2602 static const struct net_device_ops ionic_netdev_ops = {
2603 .ndo_open = ionic_open,
2604 .ndo_stop = ionic_stop,
2605 .ndo_eth_ioctl = ionic_eth_ioctl,
2606 .ndo_start_xmit = ionic_start_xmit,
2607 .ndo_get_stats64 = ionic_get_stats64,
2608 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2609 .ndo_set_features = ionic_set_features,
2610 .ndo_set_mac_address = ionic_set_mac_address,
2611 .ndo_validate_addr = eth_validate_addr,
2612 .ndo_tx_timeout = ionic_tx_timeout,
2613 .ndo_change_mtu = ionic_change_mtu,
2614 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2615 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2616 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2617 .ndo_set_vf_trust = ionic_set_vf_trust,
2618 .ndo_set_vf_mac = ionic_set_vf_mac,
2619 .ndo_set_vf_rate = ionic_set_vf_rate,
2620 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2621 .ndo_get_vf_config = ionic_get_vf_config,
2622 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2623 .ndo_get_vf_stats = ionic_get_vf_stats,
2624 };
2625
ionic_swap_queues(struct ionic_qcq * a,struct ionic_qcq * b)2626 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2627 {
2628 /* only swapping the queues, not the napi, flags, or other stuff */
2629 swap(a->q.features, b->q.features);
2630 swap(a->q.num_descs, b->q.num_descs);
2631 swap(a->q.desc_size, b->q.desc_size);
2632 swap(a->q.base, b->q.base);
2633 swap(a->q.base_pa, b->q.base_pa);
2634 swap(a->q.info, b->q.info);
2635 swap(a->q_base, b->q_base);
2636 swap(a->q_base_pa, b->q_base_pa);
2637 swap(a->q_size, b->q_size);
2638
2639 swap(a->q.sg_desc_size, b->q.sg_desc_size);
2640 swap(a->q.sg_base, b->q.sg_base);
2641 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2642 swap(a->sg_base, b->sg_base);
2643 swap(a->sg_base_pa, b->sg_base_pa);
2644 swap(a->sg_size, b->sg_size);
2645
2646 swap(a->cq.num_descs, b->cq.num_descs);
2647 swap(a->cq.desc_size, b->cq.desc_size);
2648 swap(a->cq.base, b->cq.base);
2649 swap(a->cq.base_pa, b->cq.base_pa);
2650 swap(a->cq.info, b->cq.info);
2651 swap(a->cq_base, b->cq_base);
2652 swap(a->cq_base_pa, b->cq_base_pa);
2653 swap(a->cq_size, b->cq_size);
2654
2655 ionic_debugfs_del_qcq(a);
2656 ionic_debugfs_add_qcq(a->q.lif, a);
2657 }
2658
ionic_reconfigure_queues(struct ionic_lif * lif,struct ionic_queue_params * qparam)2659 int ionic_reconfigure_queues(struct ionic_lif *lif,
2660 struct ionic_queue_params *qparam)
2661 {
2662 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2663 struct ionic_qcq **tx_qcqs = NULL;
2664 struct ionic_qcq **rx_qcqs = NULL;
2665 unsigned int flags, i;
2666 int err = 0;
2667
2668 /* allocate temporary qcq arrays to hold new queue structs */
2669 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2670 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2671 sizeof(struct ionic_qcq *), GFP_KERNEL);
2672 if (!tx_qcqs) {
2673 err = -ENOMEM;
2674 goto err_out;
2675 }
2676 }
2677 if (qparam->nxqs != lif->nxqs ||
2678 qparam->nrxq_descs != lif->nrxq_descs ||
2679 qparam->rxq_features != lif->rxq_features) {
2680 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2681 sizeof(struct ionic_qcq *), GFP_KERNEL);
2682 if (!rx_qcqs) {
2683 err = -ENOMEM;
2684 goto err_out;
2685 }
2686 }
2687
2688 /* allocate new desc_info and rings, but leave the interrupt setup
2689 * until later so as to not mess with the still-running queues
2690 */
2691 if (tx_qcqs) {
2692 num_desc = qparam->ntxq_descs;
2693 desc_sz = sizeof(struct ionic_txq_desc);
2694 comp_sz = sizeof(struct ionic_txq_comp);
2695
2696 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2697 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2698 sizeof(struct ionic_txq_sg_desc_v1))
2699 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2700 else
2701 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2702
2703 for (i = 0; i < qparam->nxqs; i++) {
2704 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2705 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2706 num_desc, desc_sz, comp_sz, sg_desc_sz,
2707 lif->kern_pid, &tx_qcqs[i]);
2708 if (err)
2709 goto err_out;
2710 }
2711 }
2712
2713 if (rx_qcqs) {
2714 num_desc = qparam->nrxq_descs;
2715 desc_sz = sizeof(struct ionic_rxq_desc);
2716 comp_sz = sizeof(struct ionic_rxq_comp);
2717 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2718
2719 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2720 comp_sz *= 2;
2721
2722 for (i = 0; i < qparam->nxqs; i++) {
2723 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2724 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2725 num_desc, desc_sz, comp_sz, sg_desc_sz,
2726 lif->kern_pid, &rx_qcqs[i]);
2727 if (err)
2728 goto err_out;
2729
2730 rx_qcqs[i]->q.features = qparam->rxq_features;
2731 }
2732 }
2733
2734 /* stop and clean the queues */
2735 ionic_stop_queues_reconfig(lif);
2736
2737 if (qparam->nxqs != lif->nxqs) {
2738 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2739 if (err)
2740 goto err_out_reinit_unlock;
2741 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2742 if (err) {
2743 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2744 goto err_out_reinit_unlock;
2745 }
2746 }
2747
2748 /* swap new desc_info and rings, keeping existing interrupt config */
2749 if (tx_qcqs) {
2750 lif->ntxq_descs = qparam->ntxq_descs;
2751 for (i = 0; i < qparam->nxqs; i++)
2752 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2753 }
2754
2755 if (rx_qcqs) {
2756 lif->nrxq_descs = qparam->nrxq_descs;
2757 for (i = 0; i < qparam->nxqs; i++)
2758 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2759 }
2760
2761 /* if we need to change the interrupt layout, this is the time */
2762 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2763 qparam->nxqs != lif->nxqs) {
2764 if (qparam->intr_split) {
2765 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2766 } else {
2767 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2768 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2769 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2770 }
2771
2772 /* clear existing interrupt assignments */
2773 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2774 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2775 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2776 }
2777
2778 /* re-assign the interrupts */
2779 for (i = 0; i < qparam->nxqs; i++) {
2780 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2781 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2782 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2783 lif->rxqcqs[i]->intr.index,
2784 lif->rx_coalesce_hw);
2785
2786 if (qparam->intr_split) {
2787 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2788 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2789 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2790 lif->txqcqs[i]->intr.index,
2791 lif->tx_coalesce_hw);
2792 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2793 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2794 } else {
2795 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2796 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2797 }
2798 }
2799 }
2800
2801 /* now we can rework the debugfs mappings */
2802 if (tx_qcqs) {
2803 for (i = 0; i < qparam->nxqs; i++) {
2804 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2805 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2806 }
2807 }
2808
2809 if (rx_qcqs) {
2810 for (i = 0; i < qparam->nxqs; i++) {
2811 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2812 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2813 }
2814 }
2815
2816 swap(lif->nxqs, qparam->nxqs);
2817 swap(lif->rxq_features, qparam->rxq_features);
2818
2819 err_out_reinit_unlock:
2820 /* re-init the queues, but don't lose an error code */
2821 if (err)
2822 ionic_start_queues_reconfig(lif);
2823 else
2824 err = ionic_start_queues_reconfig(lif);
2825
2826 err_out:
2827 /* free old allocs without cleaning intr */
2828 for (i = 0; i < qparam->nxqs; i++) {
2829 if (tx_qcqs && tx_qcqs[i]) {
2830 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2831 ionic_qcq_free(lif, tx_qcqs[i]);
2832 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2833 tx_qcqs[i] = NULL;
2834 }
2835 if (rx_qcqs && rx_qcqs[i]) {
2836 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2837 ionic_qcq_free(lif, rx_qcqs[i]);
2838 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2839 rx_qcqs[i] = NULL;
2840 }
2841 }
2842
2843 /* free q array */
2844 if (rx_qcqs) {
2845 devm_kfree(lif->ionic->dev, rx_qcqs);
2846 rx_qcqs = NULL;
2847 }
2848 if (tx_qcqs) {
2849 devm_kfree(lif->ionic->dev, tx_qcqs);
2850 tx_qcqs = NULL;
2851 }
2852
2853 /* clean the unused dma and info allocations when new set is smaller
2854 * than the full array, but leave the qcq shells in place
2855 */
2856 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2857 if (lif->txqcqs && lif->txqcqs[i]) {
2858 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2859 ionic_qcq_free(lif, lif->txqcqs[i]);
2860 }
2861
2862 if (lif->rxqcqs && lif->rxqcqs[i]) {
2863 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2864 ionic_qcq_free(lif, lif->rxqcqs[i]);
2865 }
2866 }
2867
2868 if (err)
2869 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
2870
2871 return err;
2872 }
2873
ionic_lif_alloc(struct ionic * ionic)2874 int ionic_lif_alloc(struct ionic *ionic)
2875 {
2876 struct device *dev = ionic->dev;
2877 union ionic_lif_identity *lid;
2878 struct net_device *netdev;
2879 struct ionic_lif *lif;
2880 int tbl_sz;
2881 int err;
2882
2883 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2884 if (!lid)
2885 return -ENOMEM;
2886
2887 netdev = alloc_etherdev_mqs(sizeof(*lif),
2888 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2889 if (!netdev) {
2890 dev_err(dev, "Cannot allocate netdev, aborting\n");
2891 err = -ENOMEM;
2892 goto err_out_free_lid;
2893 }
2894
2895 SET_NETDEV_DEV(netdev, dev);
2896
2897 lif = netdev_priv(netdev);
2898 lif->netdev = netdev;
2899 ionic->lif = lif;
2900 netdev->netdev_ops = &ionic_netdev_ops;
2901 ionic_ethtool_set_ops(netdev);
2902
2903 netdev->watchdog_timeo = 2 * HZ;
2904 netif_carrier_off(netdev);
2905
2906 lif->identity = lid;
2907 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2908 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2909 if (err) {
2910 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2911 lif->lif_type, err);
2912 goto err_out_free_netdev;
2913 }
2914 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2915 le32_to_cpu(lif->identity->eth.min_frame_size));
2916 lif->netdev->max_mtu =
2917 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
2918
2919 lif->neqs = ionic->neqs_per_lif;
2920 lif->nxqs = ionic->ntxqs_per_lif;
2921
2922 lif->ionic = ionic;
2923 lif->index = 0;
2924
2925 if (is_kdump_kernel()) {
2926 lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
2927 lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
2928 } else {
2929 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2930 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2931 }
2932
2933 /* Convert the default coalesce value to actual hw resolution */
2934 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2935 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2936 lif->rx_coalesce_usecs);
2937 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2938 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2939 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2940 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
2941
2942 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
2943
2944 mutex_init(&lif->queue_lock);
2945 mutex_init(&lif->config_lock);
2946
2947 spin_lock_init(&lif->adminq_lock);
2948
2949 spin_lock_init(&lif->deferred.lock);
2950 INIT_LIST_HEAD(&lif->deferred.list);
2951 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2952
2953 /* allocate lif info */
2954 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2955 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2956 &lif->info_pa, GFP_KERNEL);
2957 if (!lif->info) {
2958 dev_err(dev, "Failed to allocate lif info, aborting\n");
2959 err = -ENOMEM;
2960 goto err_out_free_mutex;
2961 }
2962
2963 ionic_debugfs_add_lif(lif);
2964
2965 /* allocate control queues and txrx queue arrays */
2966 ionic_lif_queue_identify(lif);
2967 err = ionic_qcqs_alloc(lif);
2968 if (err)
2969 goto err_out_free_lif_info;
2970
2971 /* allocate rss indirection table */
2972 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2973 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2974 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2975 &lif->rss_ind_tbl_pa,
2976 GFP_KERNEL);
2977
2978 if (!lif->rss_ind_tbl) {
2979 err = -ENOMEM;
2980 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2981 goto err_out_free_qcqs;
2982 }
2983 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2984
2985 ionic_lif_alloc_phc(lif);
2986
2987 return 0;
2988
2989 err_out_free_qcqs:
2990 ionic_qcqs_free(lif);
2991 err_out_free_lif_info:
2992 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2993 lif->info = NULL;
2994 lif->info_pa = 0;
2995 err_out_free_mutex:
2996 mutex_destroy(&lif->config_lock);
2997 mutex_destroy(&lif->queue_lock);
2998 err_out_free_netdev:
2999 free_netdev(lif->netdev);
3000 lif = NULL;
3001 err_out_free_lid:
3002 kfree(lid);
3003
3004 return err;
3005 }
3006
ionic_lif_reset(struct ionic_lif * lif)3007 static void ionic_lif_reset(struct ionic_lif *lif)
3008 {
3009 struct ionic_dev *idev = &lif->ionic->idev;
3010
3011 if (!ionic_is_fw_running(idev))
3012 return;
3013
3014 mutex_lock(&lif->ionic->dev_cmd_lock);
3015 ionic_dev_cmd_lif_reset(idev, lif->index);
3016 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3017 mutex_unlock(&lif->ionic->dev_cmd_lock);
3018 }
3019
ionic_lif_handle_fw_down(struct ionic_lif * lif)3020 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3021 {
3022 struct ionic *ionic = lif->ionic;
3023
3024 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3025 return;
3026
3027 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3028
3029 netif_device_detach(lif->netdev);
3030
3031 mutex_lock(&lif->queue_lock);
3032 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3033 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3034 ionic_stop_queues(lif);
3035 }
3036
3037 if (netif_running(lif->netdev)) {
3038 ionic_txrx_deinit(lif);
3039 ionic_txrx_free(lif);
3040 }
3041 ionic_lif_deinit(lif);
3042 ionic_reset(ionic);
3043 ionic_qcqs_free(lif);
3044
3045 mutex_unlock(&lif->queue_lock);
3046
3047 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
3048 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3049 }
3050
ionic_lif_handle_fw_up(struct ionic_lif * lif)3051 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3052 {
3053 struct ionic *ionic = lif->ionic;
3054 int err;
3055
3056 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3057 return;
3058
3059 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3060
3061 ionic_init_devinfo(ionic);
3062 err = ionic_identify(ionic);
3063 if (err)
3064 goto err_out;
3065 err = ionic_port_identify(ionic);
3066 if (err)
3067 goto err_out;
3068 err = ionic_port_init(ionic);
3069 if (err)
3070 goto err_out;
3071
3072 mutex_lock(&lif->queue_lock);
3073
3074 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3075 dev_info(ionic->dev, "FW Up: clearing broken state\n");
3076
3077 err = ionic_qcqs_alloc(lif);
3078 if (err)
3079 goto err_unlock;
3080
3081 err = ionic_lif_init(lif);
3082 if (err)
3083 goto err_qcqs_free;
3084
3085 if (lif->registered)
3086 ionic_lif_set_netdev_info(lif);
3087
3088 ionic_rx_filter_replay(lif);
3089
3090 if (netif_running(lif->netdev)) {
3091 err = ionic_txrx_alloc(lif);
3092 if (err)
3093 goto err_lifs_deinit;
3094
3095 err = ionic_txrx_init(lif);
3096 if (err)
3097 goto err_txrx_free;
3098 }
3099
3100 mutex_unlock(&lif->queue_lock);
3101
3102 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3103 ionic_link_status_check_request(lif, CAN_SLEEP);
3104 netif_device_attach(lif->netdev);
3105 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3106
3107 /* restore the hardware timestamping queues */
3108 ionic_lif_hwstamp_replay(lif);
3109
3110 return;
3111
3112 err_txrx_free:
3113 ionic_txrx_free(lif);
3114 err_lifs_deinit:
3115 ionic_lif_deinit(lif);
3116 err_qcqs_free:
3117 ionic_qcqs_free(lif);
3118 err_unlock:
3119 mutex_unlock(&lif->queue_lock);
3120 err_out:
3121 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3122 }
3123
ionic_lif_free(struct ionic_lif * lif)3124 void ionic_lif_free(struct ionic_lif *lif)
3125 {
3126 struct device *dev = lif->ionic->dev;
3127
3128 ionic_lif_free_phc(lif);
3129
3130 /* free rss indirection table */
3131 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3132 lif->rss_ind_tbl_pa);
3133 lif->rss_ind_tbl = NULL;
3134 lif->rss_ind_tbl_pa = 0;
3135
3136 /* free queues */
3137 ionic_qcqs_free(lif);
3138 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3139 ionic_lif_reset(lif);
3140
3141 /* free lif info */
3142 kfree(lif->identity);
3143 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3144 lif->info = NULL;
3145 lif->info_pa = 0;
3146
3147 /* unmap doorbell page */
3148 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3149 lif->kern_dbpage = NULL;
3150
3151 mutex_destroy(&lif->config_lock);
3152 mutex_destroy(&lif->queue_lock);
3153
3154 /* free netdev & lif */
3155 ionic_debugfs_del_lif(lif);
3156 free_netdev(lif->netdev);
3157 }
3158
ionic_lif_deinit(struct ionic_lif * lif)3159 void ionic_lif_deinit(struct ionic_lif *lif)
3160 {
3161 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3162 return;
3163
3164 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3165 cancel_work_sync(&lif->deferred.work);
3166 cancel_work_sync(&lif->tx_timeout_work);
3167 ionic_rx_filters_deinit(lif);
3168 if (lif->netdev->features & NETIF_F_RXHASH)
3169 ionic_lif_rss_deinit(lif);
3170 }
3171
3172 napi_disable(&lif->adminqcq->napi);
3173 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3174 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3175
3176 ionic_lif_reset(lif);
3177 }
3178
ionic_lif_adminq_init(struct ionic_lif * lif)3179 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3180 {
3181 struct device *dev = lif->ionic->dev;
3182 struct ionic_q_init_comp comp;
3183 struct ionic_dev *idev;
3184 struct ionic_qcq *qcq;
3185 struct ionic_queue *q;
3186 int err;
3187
3188 idev = &lif->ionic->idev;
3189 qcq = lif->adminqcq;
3190 q = &qcq->q;
3191
3192 mutex_lock(&lif->ionic->dev_cmd_lock);
3193 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3194 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3195 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3196 mutex_unlock(&lif->ionic->dev_cmd_lock);
3197 if (err) {
3198 netdev_err(lif->netdev, "adminq init failed %d\n", err);
3199 return err;
3200 }
3201
3202 q->hw_type = comp.hw_type;
3203 q->hw_index = le32_to_cpu(comp.hw_index);
3204 q->dbval = IONIC_DBELL_QID(q->hw_index);
3205
3206 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3207 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3208
3209 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
3210 q->dbell_jiffies = jiffies;
3211
3212 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
3213
3214 qcq->napi_qcq = qcq;
3215 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
3216
3217 napi_enable(&qcq->napi);
3218
3219 if (qcq->flags & IONIC_QCQ_F_INTR)
3220 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3221 IONIC_INTR_MASK_CLEAR);
3222
3223 qcq->flags |= IONIC_QCQ_F_INITED;
3224
3225 return 0;
3226 }
3227
ionic_lif_notifyq_init(struct ionic_lif * lif)3228 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3229 {
3230 struct ionic_qcq *qcq = lif->notifyqcq;
3231 struct device *dev = lif->ionic->dev;
3232 struct ionic_queue *q = &qcq->q;
3233 int err;
3234
3235 struct ionic_admin_ctx ctx = {
3236 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3237 .cmd.q_init = {
3238 .opcode = IONIC_CMD_Q_INIT,
3239 .lif_index = cpu_to_le16(lif->index),
3240 .type = q->type,
3241 .ver = lif->qtype_info[q->type].version,
3242 .index = cpu_to_le32(q->index),
3243 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3244 IONIC_QINIT_F_ENA),
3245 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3246 .pid = cpu_to_le16(q->pid),
3247 .ring_size = ilog2(q->num_descs),
3248 .ring_base = cpu_to_le64(q->base_pa),
3249 }
3250 };
3251
3252 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3253 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3254 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3255 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3256
3257 err = ionic_adminq_post_wait(lif, &ctx);
3258 if (err)
3259 return err;
3260
3261 lif->last_eid = 0;
3262 q->hw_type = ctx.comp.q_init.hw_type;
3263 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3264 q->dbval = IONIC_DBELL_QID(q->hw_index);
3265
3266 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3267 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3268
3269 /* preset the callback info */
3270 q->info[0].cb_arg = lif;
3271
3272 qcq->flags |= IONIC_QCQ_F_INITED;
3273
3274 return 0;
3275 }
3276
ionic_station_set(struct ionic_lif * lif)3277 static int ionic_station_set(struct ionic_lif *lif)
3278 {
3279 struct net_device *netdev = lif->netdev;
3280 struct ionic_admin_ctx ctx = {
3281 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3282 .cmd.lif_getattr = {
3283 .opcode = IONIC_CMD_LIF_GETATTR,
3284 .index = cpu_to_le16(lif->index),
3285 .attr = IONIC_LIF_ATTR_MAC,
3286 },
3287 };
3288 u8 mac_address[ETH_ALEN];
3289 struct sockaddr addr;
3290 int err;
3291
3292 err = ionic_adminq_post_wait(lif, &ctx);
3293 if (err)
3294 return err;
3295 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3296 ctx.comp.lif_getattr.mac);
3297 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3298
3299 if (is_zero_ether_addr(mac_address)) {
3300 eth_hw_addr_random(netdev);
3301 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3302 ether_addr_copy(mac_address, netdev->dev_addr);
3303
3304 err = ionic_program_mac(lif, mac_address);
3305 if (err < 0)
3306 return err;
3307
3308 if (err > 0) {
3309 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3310 __func__);
3311 return 0;
3312 }
3313 }
3314
3315 if (!is_zero_ether_addr(netdev->dev_addr)) {
3316 /* If the netdev mac is non-zero and doesn't match the default
3317 * device address, it was set by something earlier and we're
3318 * likely here again after a fw-upgrade reset. We need to be
3319 * sure the netdev mac is in our filter list.
3320 */
3321 if (!ether_addr_equal(mac_address, netdev->dev_addr))
3322 ionic_lif_addr_add(lif, netdev->dev_addr);
3323 } else {
3324 /* Update the netdev mac with the device's mac */
3325 ether_addr_copy(addr.sa_data, mac_address);
3326 addr.sa_family = AF_INET;
3327 err = eth_prepare_mac_addr_change(netdev, &addr);
3328 if (err) {
3329 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3330 addr.sa_data, err);
3331 return 0;
3332 }
3333
3334 eth_commit_mac_addr_change(netdev, &addr);
3335 }
3336
3337 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3338 netdev->dev_addr);
3339 ionic_lif_addr_add(lif, netdev->dev_addr);
3340
3341 return 0;
3342 }
3343
ionic_lif_init(struct ionic_lif * lif)3344 int ionic_lif_init(struct ionic_lif *lif)
3345 {
3346 struct ionic_dev *idev = &lif->ionic->idev;
3347 struct device *dev = lif->ionic->dev;
3348 struct ionic_lif_init_comp comp;
3349 int dbpage_num;
3350 int err;
3351
3352 mutex_lock(&lif->ionic->dev_cmd_lock);
3353 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3354 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3355 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3356 mutex_unlock(&lif->ionic->dev_cmd_lock);
3357 if (err)
3358 return err;
3359
3360 lif->hw_index = le16_to_cpu(comp.hw_index);
3361
3362 /* now that we have the hw_index we can figure out our doorbell page */
3363 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3364 if (!lif->dbid_count) {
3365 dev_err(dev, "No doorbell pages, aborting\n");
3366 return -EINVAL;
3367 }
3368
3369 lif->kern_pid = 0;
3370 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3371 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3372 if (!lif->kern_dbpage) {
3373 dev_err(dev, "Cannot map dbpage, aborting\n");
3374 return -ENOMEM;
3375 }
3376
3377 err = ionic_lif_adminq_init(lif);
3378 if (err)
3379 goto err_out_adminq_deinit;
3380
3381 if (lif->ionic->nnqs_per_lif) {
3382 err = ionic_lif_notifyq_init(lif);
3383 if (err)
3384 goto err_out_notifyq_deinit;
3385 }
3386
3387 err = ionic_init_nic_features(lif);
3388 if (err)
3389 goto err_out_notifyq_deinit;
3390
3391 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3392 err = ionic_rx_filters_init(lif);
3393 if (err)
3394 goto err_out_notifyq_deinit;
3395 }
3396
3397 err = ionic_station_set(lif);
3398 if (err)
3399 goto err_out_notifyq_deinit;
3400
3401 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3402
3403 set_bit(IONIC_LIF_F_INITED, lif->state);
3404
3405 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3406
3407 return 0;
3408
3409 err_out_notifyq_deinit:
3410 napi_disable(&lif->adminqcq->napi);
3411 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3412 err_out_adminq_deinit:
3413 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3414 ionic_lif_reset(lif);
3415 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3416 lif->kern_dbpage = NULL;
3417
3418 return err;
3419 }
3420
ionic_lif_notify_work(struct work_struct * ws)3421 static void ionic_lif_notify_work(struct work_struct *ws)
3422 {
3423 }
3424
ionic_lif_set_netdev_info(struct ionic_lif * lif)3425 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3426 {
3427 struct ionic_admin_ctx ctx = {
3428 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3429 .cmd.lif_setattr = {
3430 .opcode = IONIC_CMD_LIF_SETATTR,
3431 .index = cpu_to_le16(lif->index),
3432 .attr = IONIC_LIF_ATTR_NAME,
3433 },
3434 };
3435
3436 strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3437 sizeof(ctx.cmd.lif_setattr.name));
3438
3439 ionic_adminq_post_wait(lif, &ctx);
3440 }
3441
ionic_netdev_lif(struct net_device * netdev)3442 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3443 {
3444 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3445 return NULL;
3446
3447 return netdev_priv(netdev);
3448 }
3449
ionic_lif_notify(struct notifier_block * nb,unsigned long event,void * info)3450 static int ionic_lif_notify(struct notifier_block *nb,
3451 unsigned long event, void *info)
3452 {
3453 struct net_device *ndev = netdev_notifier_info_to_dev(info);
3454 struct ionic *ionic = container_of(nb, struct ionic, nb);
3455 struct ionic_lif *lif = ionic_netdev_lif(ndev);
3456
3457 if (!lif || lif->ionic != ionic)
3458 return NOTIFY_DONE;
3459
3460 switch (event) {
3461 case NETDEV_CHANGENAME:
3462 ionic_lif_set_netdev_info(lif);
3463 break;
3464 }
3465
3466 return NOTIFY_DONE;
3467 }
3468
ionic_lif_register(struct ionic_lif * lif)3469 int ionic_lif_register(struct ionic_lif *lif)
3470 {
3471 int err;
3472
3473 ionic_lif_register_phc(lif);
3474
3475 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3476
3477 lif->ionic->nb.notifier_call = ionic_lif_notify;
3478
3479 err = register_netdevice_notifier(&lif->ionic->nb);
3480 if (err)
3481 lif->ionic->nb.notifier_call = NULL;
3482
3483 /* only register LIF0 for now */
3484 err = register_netdev(lif->netdev);
3485 if (err) {
3486 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3487 ionic_lif_unregister_phc(lif);
3488 return err;
3489 }
3490
3491 ionic_link_status_check_request(lif, CAN_SLEEP);
3492 lif->registered = true;
3493 ionic_lif_set_netdev_info(lif);
3494
3495 return 0;
3496 }
3497
ionic_lif_unregister(struct ionic_lif * lif)3498 void ionic_lif_unregister(struct ionic_lif *lif)
3499 {
3500 if (lif->ionic->nb.notifier_call) {
3501 unregister_netdevice_notifier(&lif->ionic->nb);
3502 cancel_work_sync(&lif->ionic->nb_work);
3503 lif->ionic->nb.notifier_call = NULL;
3504 }
3505
3506 if (lif->netdev->reg_state == NETREG_REGISTERED)
3507 unregister_netdev(lif->netdev);
3508
3509 ionic_lif_unregister_phc(lif);
3510
3511 lif->registered = false;
3512 }
3513
ionic_lif_queue_identify(struct ionic_lif * lif)3514 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3515 {
3516 union ionic_q_identity __iomem *q_ident;
3517 struct ionic *ionic = lif->ionic;
3518 struct ionic_dev *idev;
3519 int qtype;
3520 int err;
3521
3522 idev = &lif->ionic->idev;
3523 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3524
3525 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3526 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3527
3528 /* filter out the ones we know about */
3529 switch (qtype) {
3530 case IONIC_QTYPE_ADMINQ:
3531 case IONIC_QTYPE_NOTIFYQ:
3532 case IONIC_QTYPE_RXQ:
3533 case IONIC_QTYPE_TXQ:
3534 break;
3535 default:
3536 continue;
3537 }
3538
3539 memset(qti, 0, sizeof(*qti));
3540
3541 mutex_lock(&ionic->dev_cmd_lock);
3542 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3543 ionic_qtype_versions[qtype]);
3544 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3545 if (!err) {
3546 qti->version = readb(&q_ident->version);
3547 qti->supported = readb(&q_ident->supported);
3548 qti->features = readq(&q_ident->features);
3549 qti->desc_sz = readw(&q_ident->desc_sz);
3550 qti->comp_sz = readw(&q_ident->comp_sz);
3551 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3552 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3553 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3554 }
3555 mutex_unlock(&ionic->dev_cmd_lock);
3556
3557 if (err == -EINVAL) {
3558 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3559 continue;
3560 } else if (err == -EIO) {
3561 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3562 return;
3563 } else if (err) {
3564 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3565 qtype, err);
3566 return;
3567 }
3568
3569 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3570 qtype, qti->version);
3571 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3572 qtype, qti->supported);
3573 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3574 qtype, qti->features);
3575 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3576 qtype, qti->desc_sz);
3577 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3578 qtype, qti->comp_sz);
3579 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3580 qtype, qti->sg_desc_sz);
3581 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3582 qtype, qti->max_sg_elems);
3583 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3584 qtype, qti->sg_desc_stride);
3585 }
3586 }
3587
ionic_lif_identify(struct ionic * ionic,u8 lif_type,union ionic_lif_identity * lid)3588 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3589 union ionic_lif_identity *lid)
3590 {
3591 struct ionic_dev *idev = &ionic->idev;
3592 size_t sz;
3593 int err;
3594
3595 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3596
3597 mutex_lock(&ionic->dev_cmd_lock);
3598 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3599 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3600 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3601 mutex_unlock(&ionic->dev_cmd_lock);
3602 if (err)
3603 return (err);
3604
3605 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3606 le64_to_cpu(lid->capabilities));
3607
3608 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3609 le32_to_cpu(lid->eth.max_ucast_filters));
3610 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3611 le32_to_cpu(lid->eth.max_mcast_filters));
3612 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3613 le64_to_cpu(lid->eth.config.features));
3614 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3615 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3616 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3617 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3618 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3619 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3620 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3621 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3622 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3623 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3624 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3625 le32_to_cpu(lid->eth.config.mtu));
3626
3627 return 0;
3628 }
3629
ionic_lif_size(struct ionic * ionic)3630 int ionic_lif_size(struct ionic *ionic)
3631 {
3632 struct ionic_identity *ident = &ionic->ident;
3633 unsigned int nintrs, dev_nintrs;
3634 union ionic_lif_config *lc;
3635 unsigned int ntxqs_per_lif;
3636 unsigned int nrxqs_per_lif;
3637 unsigned int neqs_per_lif;
3638 unsigned int nnqs_per_lif;
3639 unsigned int nxqs, neqs;
3640 unsigned int min_intrs;
3641 int err;
3642
3643 /* retrieve basic values from FW */
3644 lc = &ident->lif.eth.config;
3645 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3646 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3647 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3648 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3649 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3650
3651 /* limit values to play nice with kdump */
3652 if (is_kdump_kernel()) {
3653 dev_nintrs = 2;
3654 neqs_per_lif = 0;
3655 nnqs_per_lif = 0;
3656 ntxqs_per_lif = 1;
3657 nrxqs_per_lif = 1;
3658 }
3659
3660 /* reserve last queue id for hardware timestamping */
3661 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3662 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3663 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3664 } else {
3665 ntxqs_per_lif -= 1;
3666 nrxqs_per_lif -= 1;
3667 }
3668 }
3669
3670 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3671 nxqs = min(nxqs, num_online_cpus());
3672 neqs = min(neqs_per_lif, num_online_cpus());
3673
3674 try_again:
3675 /* interrupt usage:
3676 * 1 for master lif adminq/notifyq
3677 * 1 for each CPU for master lif TxRx queue pairs
3678 * whatever's left is for RDMA queues
3679 */
3680 nintrs = 1 + nxqs + neqs;
3681 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3682
3683 if (nintrs > dev_nintrs)
3684 goto try_fewer;
3685
3686 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3687 if (err < 0 && err != -ENOSPC) {
3688 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3689 return err;
3690 }
3691 if (err == -ENOSPC)
3692 goto try_fewer;
3693
3694 if (err != nintrs) {
3695 ionic_bus_free_irq_vectors(ionic);
3696 goto try_fewer;
3697 }
3698
3699 ionic->nnqs_per_lif = nnqs_per_lif;
3700 ionic->neqs_per_lif = neqs;
3701 ionic->ntxqs_per_lif = nxqs;
3702 ionic->nrxqs_per_lif = nxqs;
3703 ionic->nintrs = nintrs;
3704
3705 ionic_debugfs_add_sizes(ionic);
3706
3707 return 0;
3708
3709 try_fewer:
3710 if (nnqs_per_lif > 1) {
3711 nnqs_per_lif >>= 1;
3712 goto try_again;
3713 }
3714 if (neqs > 1) {
3715 neqs >>= 1;
3716 goto try_again;
3717 }
3718 if (nxqs > 1) {
3719 nxqs >>= 1;
3720 goto try_again;
3721 }
3722 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3723 return -ENOSPC;
3724 }
3725