1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15
16 #include "ionic.h"
17 #include "ionic_bus.h"
18 #include "ionic_lif.h"
19 #include "ionic_txrx.h"
20 #include "ionic_ethtool.h"
21 #include "ionic_debugfs.h"
22
23 /* queuetype support level */
24 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
25 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
26 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
27 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
28 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
29 * 1 = ... with Tx SG version 1
30 */
31 };
32
33 static void ionic_link_status_check(struct ionic_lif *lif);
34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
37
38 static void ionic_txrx_deinit(struct ionic_lif *lif);
39 static int ionic_txrx_init(struct ionic_lif *lif);
40 static int ionic_start_queues(struct ionic_lif *lif);
41 static void ionic_stop_queues(struct ionic_lif *lif);
42 static void ionic_lif_queue_identify(struct ionic_lif *lif);
43
ionic_dim_work(struct work_struct * work)44 static void ionic_dim_work(struct work_struct *work)
45 {
46 struct dim *dim = container_of(work, struct dim, work);
47 struct ionic_intr_info *intr;
48 struct dim_cq_moder cur_moder;
49 struct ionic_qcq *qcq;
50 struct ionic_lif *lif;
51 u32 new_coal;
52
53 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
54 qcq = container_of(dim, struct ionic_qcq, dim);
55 lif = qcq->q.lif;
56 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
57 new_coal = new_coal ? new_coal : 1;
58
59 intr = &qcq->intr;
60 if (intr->dim_coal_hw != new_coal) {
61 intr->dim_coal_hw = new_coal;
62
63 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
64 intr->index, intr->dim_coal_hw);
65 }
66
67 dim->state = DIM_START_MEASURE;
68 }
69
ionic_lif_deferred_work(struct work_struct * work)70 static void ionic_lif_deferred_work(struct work_struct *work)
71 {
72 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
73 struct ionic_deferred *def = &lif->deferred;
74 struct ionic_deferred_work *w = NULL;
75
76 do {
77 spin_lock_bh(&def->lock);
78 if (!list_empty(&def->list)) {
79 w = list_first_entry(&def->list,
80 struct ionic_deferred_work, list);
81 list_del(&w->list);
82 }
83 spin_unlock_bh(&def->lock);
84
85 if (!w)
86 break;
87
88 switch (w->type) {
89 case IONIC_DW_TYPE_RX_MODE:
90 ionic_lif_rx_mode(lif);
91 break;
92 case IONIC_DW_TYPE_LINK_STATUS:
93 ionic_link_status_check(lif);
94 break;
95 case IONIC_DW_TYPE_LIF_RESET:
96 if (w->fw_status) {
97 ionic_lif_handle_fw_up(lif);
98 } else {
99 ionic_lif_handle_fw_down(lif);
100
101 /* Fire off another watchdog to see
102 * if the FW is already back rather than
103 * waiting another whole cycle
104 */
105 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
106 }
107 break;
108 default:
109 break;
110 }
111 kfree(w);
112 w = NULL;
113 } while (true);
114 }
115
ionic_lif_deferred_enqueue(struct ionic_deferred * def,struct ionic_deferred_work * work)116 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
117 struct ionic_deferred_work *work)
118 {
119 spin_lock_bh(&def->lock);
120 list_add_tail(&work->list, &def->list);
121 spin_unlock_bh(&def->lock);
122 schedule_work(&def->work);
123 }
124
ionic_link_status_check(struct ionic_lif * lif)125 static void ionic_link_status_check(struct ionic_lif *lif)
126 {
127 struct net_device *netdev = lif->netdev;
128 u16 link_status;
129 bool link_up;
130
131 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
132 return;
133
134 /* Don't put carrier back up if we're in a broken state */
135 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
136 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
137 return;
138 }
139
140 link_status = le16_to_cpu(lif->info->status.link_status);
141 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
142
143 if (link_up) {
144 int err = 0;
145
146 if (netdev->flags & IFF_UP && netif_running(netdev)) {
147 mutex_lock(&lif->queue_lock);
148 err = ionic_start_queues(lif);
149 if (err && err != -EBUSY) {
150 netdev_err(lif->netdev,
151 "Failed to start queues: %d\n", err);
152 set_bit(IONIC_LIF_F_BROKEN, lif->state);
153 netif_carrier_off(lif->netdev);
154 }
155 mutex_unlock(&lif->queue_lock);
156 }
157
158 if (!err && !netif_carrier_ok(netdev)) {
159 ionic_port_identify(lif->ionic);
160 netdev_info(netdev, "Link up - %d Gbps\n",
161 le32_to_cpu(lif->info->status.link_speed) / 1000);
162 netif_carrier_on(netdev);
163 }
164 } else {
165 if (netif_carrier_ok(netdev)) {
166 netdev_info(netdev, "Link down\n");
167 netif_carrier_off(netdev);
168 }
169
170 if (netdev->flags & IFF_UP && netif_running(netdev)) {
171 mutex_lock(&lif->queue_lock);
172 ionic_stop_queues(lif);
173 mutex_unlock(&lif->queue_lock);
174 }
175 }
176
177 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
178 }
179
ionic_link_status_check_request(struct ionic_lif * lif,bool can_sleep)180 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
181 {
182 struct ionic_deferred_work *work;
183
184 /* we only need one request outstanding at a time */
185 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
186 return;
187
188 if (!can_sleep) {
189 work = kzalloc(sizeof(*work), GFP_ATOMIC);
190 if (!work) {
191 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
192 return;
193 }
194
195 work->type = IONIC_DW_TYPE_LINK_STATUS;
196 ionic_lif_deferred_enqueue(&lif->deferred, work);
197 } else {
198 ionic_link_status_check(lif);
199 }
200 }
201
ionic_isr(int irq,void * data)202 static irqreturn_t ionic_isr(int irq, void *data)
203 {
204 struct napi_struct *napi = data;
205
206 napi_schedule_irqoff(napi);
207
208 return IRQ_HANDLED;
209 }
210
ionic_request_irq(struct ionic_lif * lif,struct ionic_qcq * qcq)211 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
212 {
213 struct ionic_intr_info *intr = &qcq->intr;
214 struct device *dev = lif->ionic->dev;
215 struct ionic_queue *q = &qcq->q;
216 const char *name;
217
218 if (lif->registered)
219 name = lif->netdev->name;
220 else
221 name = dev_name(dev);
222
223 snprintf(intr->name, sizeof(intr->name),
224 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
225
226 return devm_request_irq(dev, intr->vector, ionic_isr,
227 0, intr->name, &qcq->napi);
228 }
229
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)230 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
231 {
232 struct ionic *ionic = lif->ionic;
233 int index;
234
235 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
236 if (index == ionic->nintrs) {
237 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
238 __func__, index, ionic->nintrs);
239 return -ENOSPC;
240 }
241
242 set_bit(index, ionic->intrs);
243 ionic_intr_init(&ionic->idev, intr, index);
244
245 return 0;
246 }
247
ionic_intr_free(struct ionic * ionic,int index)248 static void ionic_intr_free(struct ionic *ionic, int index)
249 {
250 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
251 clear_bit(index, ionic->intrs);
252 }
253
ionic_qcq_enable(struct ionic_qcq * qcq)254 static int ionic_qcq_enable(struct ionic_qcq *qcq)
255 {
256 struct ionic_queue *q = &qcq->q;
257 struct ionic_lif *lif = q->lif;
258 struct ionic_dev *idev;
259 struct device *dev;
260
261 struct ionic_admin_ctx ctx = {
262 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
263 .cmd.q_control = {
264 .opcode = IONIC_CMD_Q_CONTROL,
265 .lif_index = cpu_to_le16(lif->index),
266 .type = q->type,
267 .index = cpu_to_le32(q->index),
268 .oper = IONIC_Q_ENABLE,
269 },
270 };
271 int ret;
272
273 idev = &lif->ionic->idev;
274 dev = lif->ionic->dev;
275
276 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
277 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
278
279 if (qcq->flags & IONIC_QCQ_F_INTR)
280 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
281
282 ret = ionic_adminq_post_wait(lif, &ctx);
283 if (ret)
284 return ret;
285
286 if (qcq->napi.poll)
287 napi_enable(&qcq->napi);
288
289 if (qcq->flags & IONIC_QCQ_F_INTR) {
290 irq_set_affinity_hint(qcq->intr.vector,
291 &qcq->intr.affinity_mask);
292 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
293 IONIC_INTR_MASK_CLEAR);
294 }
295
296 return 0;
297 }
298
ionic_qcq_disable(struct ionic_qcq * qcq,bool send_to_hw)299 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
300 {
301 struct ionic_queue *q;
302 struct ionic_lif *lif;
303 int err = 0;
304
305 struct ionic_admin_ctx ctx = {
306 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
307 .cmd.q_control = {
308 .opcode = IONIC_CMD_Q_CONTROL,
309 .oper = IONIC_Q_DISABLE,
310 },
311 };
312
313 if (!qcq)
314 return -ENXIO;
315
316 q = &qcq->q;
317 lif = q->lif;
318
319 if (qcq->flags & IONIC_QCQ_F_INTR) {
320 struct ionic_dev *idev = &lif->ionic->idev;
321
322 cancel_work_sync(&qcq->dim.work);
323 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
324 IONIC_INTR_MASK_SET);
325 synchronize_irq(qcq->intr.vector);
326 irq_set_affinity_hint(qcq->intr.vector, NULL);
327 napi_disable(&qcq->napi);
328 }
329
330 if (send_to_hw) {
331 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
332 ctx.cmd.q_control.type = q->type;
333 ctx.cmd.q_control.index = cpu_to_le32(q->index);
334 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
335 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
336
337 err = ionic_adminq_post_wait(lif, &ctx);
338 }
339
340 return err;
341 }
342
ionic_lif_qcq_deinit(struct ionic_lif * lif,struct ionic_qcq * qcq)343 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
344 {
345 struct ionic_dev *idev = &lif->ionic->idev;
346
347 if (!qcq)
348 return;
349
350 if (!(qcq->flags & IONIC_QCQ_F_INITED))
351 return;
352
353 if (qcq->flags & IONIC_QCQ_F_INTR) {
354 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
355 IONIC_INTR_MASK_SET);
356 netif_napi_del(&qcq->napi);
357 }
358
359 qcq->flags &= ~IONIC_QCQ_F_INITED;
360 }
361
ionic_qcq_intr_free(struct ionic_lif * lif,struct ionic_qcq * qcq)362 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
363 {
364 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
365 return;
366
367 irq_set_affinity_hint(qcq->intr.vector, NULL);
368 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
369 qcq->intr.vector = 0;
370 ionic_intr_free(lif->ionic, qcq->intr.index);
371 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
372 }
373
ionic_qcq_free(struct ionic_lif * lif,struct ionic_qcq * qcq)374 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
375 {
376 struct device *dev = lif->ionic->dev;
377
378 if (!qcq)
379 return;
380
381 ionic_debugfs_del_qcq(qcq);
382
383 if (qcq->q_base) {
384 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
385 qcq->q_base = NULL;
386 qcq->q_base_pa = 0;
387 }
388
389 if (qcq->cq_base) {
390 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
391 qcq->cq_base = NULL;
392 qcq->cq_base_pa = 0;
393 }
394
395 if (qcq->sg_base) {
396 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
397 qcq->sg_base = NULL;
398 qcq->sg_base_pa = 0;
399 }
400
401 ionic_qcq_intr_free(lif, qcq);
402
403 if (qcq->cq.info) {
404 devm_kfree(dev, qcq->cq.info);
405 qcq->cq.info = NULL;
406 }
407 if (qcq->q.info) {
408 devm_kfree(dev, qcq->q.info);
409 qcq->q.info = NULL;
410 }
411 }
412
ionic_qcqs_free(struct ionic_lif * lif)413 static void ionic_qcqs_free(struct ionic_lif *lif)
414 {
415 struct device *dev = lif->ionic->dev;
416 struct ionic_qcq *adminqcq;
417 unsigned long irqflags;
418
419 if (lif->notifyqcq) {
420 ionic_qcq_free(lif, lif->notifyqcq);
421 devm_kfree(dev, lif->notifyqcq);
422 lif->notifyqcq = NULL;
423 }
424
425 if (lif->adminqcq) {
426 spin_lock_irqsave(&lif->adminq_lock, irqflags);
427 adminqcq = READ_ONCE(lif->adminqcq);
428 lif->adminqcq = NULL;
429 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
430 if (adminqcq) {
431 ionic_qcq_free(lif, adminqcq);
432 devm_kfree(dev, adminqcq);
433 }
434 }
435
436 if (lif->rxqcqs) {
437 devm_kfree(dev, lif->rxqstats);
438 lif->rxqstats = NULL;
439 devm_kfree(dev, lif->rxqcqs);
440 lif->rxqcqs = NULL;
441 }
442
443 if (lif->txqcqs) {
444 devm_kfree(dev, lif->txqstats);
445 lif->txqstats = NULL;
446 devm_kfree(dev, lif->txqcqs);
447 lif->txqcqs = NULL;
448 }
449 }
450
ionic_link_qcq_interrupts(struct ionic_qcq * src_qcq,struct ionic_qcq * n_qcq)451 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
452 struct ionic_qcq *n_qcq)
453 {
454 n_qcq->intr.vector = src_qcq->intr.vector;
455 n_qcq->intr.index = src_qcq->intr.index;
456 }
457
ionic_alloc_qcq_interrupt(struct ionic_lif * lif,struct ionic_qcq * qcq)458 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
459 {
460 int err;
461
462 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
463 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
464 return 0;
465 }
466
467 err = ionic_intr_alloc(lif, &qcq->intr);
468 if (err) {
469 netdev_warn(lif->netdev, "no intr for %s: %d\n",
470 qcq->q.name, err);
471 goto err_out;
472 }
473
474 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
475 if (err < 0) {
476 netdev_warn(lif->netdev, "no vector for %s: %d\n",
477 qcq->q.name, err);
478 goto err_out_free_intr;
479 }
480 qcq->intr.vector = err;
481 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
482 IONIC_INTR_MASK_SET);
483
484 err = ionic_request_irq(lif, qcq);
485 if (err) {
486 netdev_warn(lif->netdev, "irq request failed %d\n", err);
487 goto err_out_free_intr;
488 }
489
490 /* try to get the irq on the local numa node first */
491 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
492 dev_to_node(lif->ionic->dev));
493 if (qcq->intr.cpu != -1)
494 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
495
496 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
497 return 0;
498
499 err_out_free_intr:
500 ionic_intr_free(lif->ionic, qcq->intr.index);
501 err_out:
502 return err;
503 }
504
ionic_qcq_alloc(struct ionic_lif * lif,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int sg_desc_size,unsigned int pid,struct ionic_qcq ** qcq)505 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
506 unsigned int index,
507 const char *name, unsigned int flags,
508 unsigned int num_descs, unsigned int desc_size,
509 unsigned int cq_desc_size,
510 unsigned int sg_desc_size,
511 unsigned int pid, struct ionic_qcq **qcq)
512 {
513 struct ionic_dev *idev = &lif->ionic->idev;
514 struct device *dev = lif->ionic->dev;
515 void *q_base, *cq_base, *sg_base;
516 dma_addr_t cq_base_pa = 0;
517 dma_addr_t sg_base_pa = 0;
518 dma_addr_t q_base_pa = 0;
519 struct ionic_qcq *new;
520 int err;
521
522 *qcq = NULL;
523
524 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
525 if (!new) {
526 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
527 err = -ENOMEM;
528 goto err_out;
529 }
530
531 new->q.dev = dev;
532 new->flags = flags;
533
534 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
535 GFP_KERNEL);
536 if (!new->q.info) {
537 netdev_err(lif->netdev, "Cannot allocate queue info\n");
538 err = -ENOMEM;
539 goto err_out_free_qcq;
540 }
541
542 new->q.type = type;
543 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
544
545 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
546 desc_size, sg_desc_size, pid);
547 if (err) {
548 netdev_err(lif->netdev, "Cannot initialize queue\n");
549 goto err_out_free_q_info;
550 }
551
552 err = ionic_alloc_qcq_interrupt(lif, new);
553 if (err)
554 goto err_out;
555
556 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
557 GFP_KERNEL);
558 if (!new->cq.info) {
559 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
560 err = -ENOMEM;
561 goto err_out_free_irq;
562 }
563
564 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
565 if (err) {
566 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
567 goto err_out_free_cq_info;
568 }
569
570 if (flags & IONIC_QCQ_F_NOTIFYQ) {
571 int q_size, cq_size;
572
573 /* q & cq need to be contiguous in case of notifyq */
574 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
575 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
576
577 new->q_size = PAGE_SIZE + q_size + cq_size;
578 new->q_base = dma_alloc_coherent(dev, new->q_size,
579 &new->q_base_pa, GFP_KERNEL);
580 if (!new->q_base) {
581 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
582 err = -ENOMEM;
583 goto err_out_free_cq_info;
584 }
585 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
586 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
587 ionic_q_map(&new->q, q_base, q_base_pa);
588
589 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
590 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
591 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
592 ionic_cq_bind(&new->cq, &new->q);
593 } else {
594 new->q_size = PAGE_SIZE + (num_descs * desc_size);
595 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
596 GFP_KERNEL);
597 if (!new->q_base) {
598 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
599 err = -ENOMEM;
600 goto err_out_free_cq_info;
601 }
602 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
603 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
604 ionic_q_map(&new->q, q_base, q_base_pa);
605
606 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
607 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
608 GFP_KERNEL);
609 if (!new->cq_base) {
610 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
611 err = -ENOMEM;
612 goto err_out_free_q;
613 }
614 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
615 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
616 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
617 ionic_cq_bind(&new->cq, &new->q);
618 }
619
620 if (flags & IONIC_QCQ_F_SG) {
621 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
622 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
623 GFP_KERNEL);
624 if (!new->sg_base) {
625 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
626 err = -ENOMEM;
627 goto err_out_free_cq;
628 }
629 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
630 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
631 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
632 }
633
634 INIT_WORK(&new->dim.work, ionic_dim_work);
635 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
636
637 *qcq = new;
638
639 return 0;
640
641 err_out_free_cq:
642 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
643 err_out_free_q:
644 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
645 err_out_free_cq_info:
646 devm_kfree(dev, new->cq.info);
647 err_out_free_irq:
648 if (flags & IONIC_QCQ_F_INTR) {
649 devm_free_irq(dev, new->intr.vector, &new->napi);
650 ionic_intr_free(lif->ionic, new->intr.index);
651 }
652 err_out_free_q_info:
653 devm_kfree(dev, new->q.info);
654 err_out_free_qcq:
655 devm_kfree(dev, new);
656 err_out:
657 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
658 return err;
659 }
660
ionic_qcqs_alloc(struct ionic_lif * lif)661 static int ionic_qcqs_alloc(struct ionic_lif *lif)
662 {
663 struct device *dev = lif->ionic->dev;
664 unsigned int flags;
665 int err;
666
667 flags = IONIC_QCQ_F_INTR;
668 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
669 IONIC_ADMINQ_LENGTH,
670 sizeof(struct ionic_admin_cmd),
671 sizeof(struct ionic_admin_comp),
672 0, lif->kern_pid, &lif->adminqcq);
673 if (err)
674 return err;
675 ionic_debugfs_add_qcq(lif, lif->adminqcq);
676
677 if (lif->ionic->nnqs_per_lif) {
678 flags = IONIC_QCQ_F_NOTIFYQ;
679 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
680 flags, IONIC_NOTIFYQ_LENGTH,
681 sizeof(struct ionic_notifyq_cmd),
682 sizeof(union ionic_notifyq_comp),
683 0, lif->kern_pid, &lif->notifyqcq);
684 if (err)
685 goto err_out;
686 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
687
688 /* Let the notifyq ride on the adminq interrupt */
689 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
690 }
691
692 err = -ENOMEM;
693 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
694 sizeof(*lif->txqcqs), GFP_KERNEL);
695 if (!lif->txqcqs)
696 goto err_out;
697 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
698 sizeof(*lif->rxqcqs), GFP_KERNEL);
699 if (!lif->rxqcqs)
700 goto err_out;
701
702 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
703 sizeof(*lif->txqstats), GFP_KERNEL);
704 if (!lif->txqstats)
705 goto err_out;
706 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
707 sizeof(*lif->rxqstats), GFP_KERNEL);
708 if (!lif->rxqstats)
709 goto err_out;
710
711 return 0;
712
713 err_out:
714 ionic_qcqs_free(lif);
715 return err;
716 }
717
ionic_qcq_sanitize(struct ionic_qcq * qcq)718 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
719 {
720 qcq->q.tail_idx = 0;
721 qcq->q.head_idx = 0;
722 qcq->cq.tail_idx = 0;
723 qcq->cq.done_color = 1;
724 memset(qcq->q_base, 0, qcq->q_size);
725 memset(qcq->cq_base, 0, qcq->cq_size);
726 memset(qcq->sg_base, 0, qcq->sg_size);
727 }
728
ionic_lif_txq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)729 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
730 {
731 struct device *dev = lif->ionic->dev;
732 struct ionic_queue *q = &qcq->q;
733 struct ionic_cq *cq = &qcq->cq;
734 struct ionic_admin_ctx ctx = {
735 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
736 .cmd.q_init = {
737 .opcode = IONIC_CMD_Q_INIT,
738 .lif_index = cpu_to_le16(lif->index),
739 .type = q->type,
740 .ver = lif->qtype_info[q->type].version,
741 .index = cpu_to_le32(q->index),
742 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
743 IONIC_QINIT_F_SG),
744 .pid = cpu_to_le16(q->pid),
745 .ring_size = ilog2(q->num_descs),
746 .ring_base = cpu_to_le64(q->base_pa),
747 .cq_ring_base = cpu_to_le64(cq->base_pa),
748 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
749 .features = cpu_to_le64(q->features),
750 },
751 };
752 unsigned int intr_index;
753 int err;
754
755 intr_index = qcq->intr.index;
756
757 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
758
759 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
760 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
761 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
762 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
763 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
764 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
765 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
766
767 ionic_qcq_sanitize(qcq);
768
769 err = ionic_adminq_post_wait(lif, &ctx);
770 if (err)
771 return err;
772
773 q->hw_type = ctx.comp.q_init.hw_type;
774 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
775 q->dbval = IONIC_DBELL_QID(q->hw_index);
776
777 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
778 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
779
780 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
781 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
782 NAPI_POLL_WEIGHT);
783
784 qcq->flags |= IONIC_QCQ_F_INITED;
785
786 return 0;
787 }
788
ionic_lif_rxq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)789 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
790 {
791 struct device *dev = lif->ionic->dev;
792 struct ionic_queue *q = &qcq->q;
793 struct ionic_cq *cq = &qcq->cq;
794 struct ionic_admin_ctx ctx = {
795 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
796 .cmd.q_init = {
797 .opcode = IONIC_CMD_Q_INIT,
798 .lif_index = cpu_to_le16(lif->index),
799 .type = q->type,
800 .ver = lif->qtype_info[q->type].version,
801 .index = cpu_to_le32(q->index),
802 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
803 IONIC_QINIT_F_SG),
804 .intr_index = cpu_to_le16(cq->bound_intr->index),
805 .pid = cpu_to_le16(q->pid),
806 .ring_size = ilog2(q->num_descs),
807 .ring_base = cpu_to_le64(q->base_pa),
808 .cq_ring_base = cpu_to_le64(cq->base_pa),
809 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
810 .features = cpu_to_le64(q->features),
811 },
812 };
813 int err;
814
815 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
816 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
817 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
818 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
819 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
820 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
821 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
822
823 ionic_qcq_sanitize(qcq);
824
825 err = ionic_adminq_post_wait(lif, &ctx);
826 if (err)
827 return err;
828
829 q->hw_type = ctx.comp.q_init.hw_type;
830 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
831 q->dbval = IONIC_DBELL_QID(q->hw_index);
832
833 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
834 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
835
836 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
837 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
838 NAPI_POLL_WEIGHT);
839 else
840 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
841 NAPI_POLL_WEIGHT);
842
843 qcq->flags |= IONIC_QCQ_F_INITED;
844
845 return 0;
846 }
847
ionic_lif_create_hwstamp_txq(struct ionic_lif * lif)848 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
849 {
850 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
851 unsigned int txq_i, flags;
852 struct ionic_qcq *txq;
853 u64 features;
854 int err;
855
856 if (lif->hwstamp_txq)
857 return 0;
858
859 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
860
861 num_desc = IONIC_MIN_TXRX_DESC;
862 desc_sz = sizeof(struct ionic_txq_desc);
863 comp_sz = 2 * sizeof(struct ionic_txq_comp);
864
865 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
866 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
867 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
868 else
869 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
870
871 txq_i = lif->ionic->ntxqs_per_lif;
872 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
873
874 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
875 num_desc, desc_sz, comp_sz, sg_desc_sz,
876 lif->kern_pid, &txq);
877 if (err)
878 goto err_qcq_alloc;
879
880 txq->q.features = features;
881
882 ionic_link_qcq_interrupts(lif->adminqcq, txq);
883 ionic_debugfs_add_qcq(lif, txq);
884
885 lif->hwstamp_txq = txq;
886
887 if (netif_running(lif->netdev)) {
888 err = ionic_lif_txq_init(lif, txq);
889 if (err)
890 goto err_qcq_init;
891
892 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
893 err = ionic_qcq_enable(txq);
894 if (err)
895 goto err_qcq_enable;
896 }
897 }
898
899 return 0;
900
901 err_qcq_enable:
902 ionic_lif_qcq_deinit(lif, txq);
903 err_qcq_init:
904 lif->hwstamp_txq = NULL;
905 ionic_debugfs_del_qcq(txq);
906 ionic_qcq_free(lif, txq);
907 devm_kfree(lif->ionic->dev, txq);
908 err_qcq_alloc:
909 return err;
910 }
911
ionic_lif_create_hwstamp_rxq(struct ionic_lif * lif)912 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
913 {
914 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
915 unsigned int rxq_i, flags;
916 struct ionic_qcq *rxq;
917 u64 features;
918 int err;
919
920 if (lif->hwstamp_rxq)
921 return 0;
922
923 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
924
925 num_desc = IONIC_MIN_TXRX_DESC;
926 desc_sz = sizeof(struct ionic_rxq_desc);
927 comp_sz = 2 * sizeof(struct ionic_rxq_comp);
928 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
929
930 rxq_i = lif->ionic->nrxqs_per_lif;
931 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
932
933 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
934 num_desc, desc_sz, comp_sz, sg_desc_sz,
935 lif->kern_pid, &rxq);
936 if (err)
937 goto err_qcq_alloc;
938
939 rxq->q.features = features;
940
941 ionic_link_qcq_interrupts(lif->adminqcq, rxq);
942 ionic_debugfs_add_qcq(lif, rxq);
943
944 lif->hwstamp_rxq = rxq;
945
946 if (netif_running(lif->netdev)) {
947 err = ionic_lif_rxq_init(lif, rxq);
948 if (err)
949 goto err_qcq_init;
950
951 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
952 ionic_rx_fill(&rxq->q);
953 err = ionic_qcq_enable(rxq);
954 if (err)
955 goto err_qcq_enable;
956 }
957 }
958
959 return 0;
960
961 err_qcq_enable:
962 ionic_lif_qcq_deinit(lif, rxq);
963 err_qcq_init:
964 lif->hwstamp_rxq = NULL;
965 ionic_debugfs_del_qcq(rxq);
966 ionic_qcq_free(lif, rxq);
967 devm_kfree(lif->ionic->dev, rxq);
968 err_qcq_alloc:
969 return err;
970 }
971
ionic_lif_config_hwstamp_rxq_all(struct ionic_lif * lif,bool rx_all)972 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
973 {
974 struct ionic_queue_params qparam;
975
976 ionic_init_queue_params(lif, &qparam);
977
978 if (rx_all)
979 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
980 else
981 qparam.rxq_features = 0;
982
983 /* if we're not running, just set the values and return */
984 if (!netif_running(lif->netdev)) {
985 lif->rxq_features = qparam.rxq_features;
986 return 0;
987 }
988
989 return ionic_reconfigure_queues(lif, &qparam);
990 }
991
ionic_lif_set_hwstamp_txmode(struct ionic_lif * lif,u16 txstamp_mode)992 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
993 {
994 struct ionic_admin_ctx ctx = {
995 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
996 .cmd.lif_setattr = {
997 .opcode = IONIC_CMD_LIF_SETATTR,
998 .index = cpu_to_le16(lif->index),
999 .attr = IONIC_LIF_ATTR_TXSTAMP,
1000 .txstamp_mode = cpu_to_le16(txstamp_mode),
1001 },
1002 };
1003
1004 return ionic_adminq_post_wait(lif, &ctx);
1005 }
1006
ionic_lif_del_hwstamp_rxfilt(struct ionic_lif * lif)1007 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1008 {
1009 struct ionic_admin_ctx ctx = {
1010 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1011 .cmd.rx_filter_del = {
1012 .opcode = IONIC_CMD_RX_FILTER_DEL,
1013 .lif_index = cpu_to_le16(lif->index),
1014 },
1015 };
1016 struct ionic_rx_filter *f;
1017 u32 filter_id;
1018 int err;
1019
1020 spin_lock_bh(&lif->rx_filters.lock);
1021
1022 f = ionic_rx_filter_rxsteer(lif);
1023 if (!f) {
1024 spin_unlock_bh(&lif->rx_filters.lock);
1025 return;
1026 }
1027
1028 filter_id = f->filter_id;
1029 ionic_rx_filter_free(lif, f);
1030
1031 spin_unlock_bh(&lif->rx_filters.lock);
1032
1033 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1034
1035 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1036
1037 err = ionic_adminq_post_wait(lif, &ctx);
1038 if (err && err != -EEXIST)
1039 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1040 }
1041
ionic_lif_add_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1042 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1043 {
1044 struct ionic_admin_ctx ctx = {
1045 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1046 .cmd.rx_filter_add = {
1047 .opcode = IONIC_CMD_RX_FILTER_ADD,
1048 .lif_index = cpu_to_le16(lif->index),
1049 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1050 .pkt_class = cpu_to_le64(pkt_class),
1051 },
1052 };
1053 u8 qtype;
1054 u32 qid;
1055 int err;
1056
1057 if (!lif->hwstamp_rxq)
1058 return -EINVAL;
1059
1060 qtype = lif->hwstamp_rxq->q.type;
1061 ctx.cmd.rx_filter_add.qtype = qtype;
1062
1063 qid = lif->hwstamp_rxq->q.index;
1064 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1065
1066 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1067 err = ionic_adminq_post_wait(lif, &ctx);
1068 if (err && err != -EEXIST)
1069 return err;
1070
1071 spin_lock_bh(&lif->rx_filters.lock);
1072 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1073 spin_unlock_bh(&lif->rx_filters.lock);
1074
1075 return err;
1076 }
1077
ionic_lif_set_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1078 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1079 {
1080 ionic_lif_del_hwstamp_rxfilt(lif);
1081
1082 if (!pkt_class)
1083 return 0;
1084
1085 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1086 }
1087
ionic_notifyq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1088 static bool ionic_notifyq_service(struct ionic_cq *cq,
1089 struct ionic_cq_info *cq_info)
1090 {
1091 union ionic_notifyq_comp *comp = cq_info->cq_desc;
1092 struct ionic_deferred_work *work;
1093 struct net_device *netdev;
1094 struct ionic_queue *q;
1095 struct ionic_lif *lif;
1096 u64 eid;
1097
1098 q = cq->bound_q;
1099 lif = q->info[0].cb_arg;
1100 netdev = lif->netdev;
1101 eid = le64_to_cpu(comp->event.eid);
1102
1103 /* Have we run out of new completions to process? */
1104 if ((s64)(eid - lif->last_eid) <= 0)
1105 return false;
1106
1107 lif->last_eid = eid;
1108
1109 dev_dbg(lif->ionic->dev, "notifyq event:\n");
1110 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1111 comp, sizeof(*comp), true);
1112
1113 switch (le16_to_cpu(comp->event.ecode)) {
1114 case IONIC_EVENT_LINK_CHANGE:
1115 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1116 break;
1117 case IONIC_EVENT_RESET:
1118 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1119 if (!work) {
1120 netdev_err(lif->netdev, "Reset event dropped\n");
1121 } else {
1122 work->type = IONIC_DW_TYPE_LIF_RESET;
1123 ionic_lif_deferred_enqueue(&lif->deferred, work);
1124 }
1125 break;
1126 default:
1127 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1128 comp->event.ecode, eid);
1129 break;
1130 }
1131
1132 return true;
1133 }
1134
ionic_adminq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1135 static bool ionic_adminq_service(struct ionic_cq *cq,
1136 struct ionic_cq_info *cq_info)
1137 {
1138 struct ionic_admin_comp *comp = cq_info->cq_desc;
1139
1140 if (!color_match(comp->color, cq->done_color))
1141 return false;
1142
1143 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1144
1145 return true;
1146 }
1147
ionic_adminq_napi(struct napi_struct * napi,int budget)1148 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1149 {
1150 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1151 struct ionic_lif *lif = napi_to_cq(napi)->lif;
1152 struct ionic_dev *idev = &lif->ionic->idev;
1153 unsigned long irqflags;
1154 unsigned int flags = 0;
1155 int rx_work = 0;
1156 int tx_work = 0;
1157 int n_work = 0;
1158 int a_work = 0;
1159 int work_done;
1160 int credits;
1161
1162 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1163 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1164 ionic_notifyq_service, NULL, NULL);
1165
1166 spin_lock_irqsave(&lif->adminq_lock, irqflags);
1167 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1168 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1169 ionic_adminq_service, NULL, NULL);
1170 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1171
1172 if (lif->hwstamp_rxq)
1173 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1174 ionic_rx_service, NULL, NULL);
1175
1176 if (lif->hwstamp_txq)
1177 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1178 ionic_tx_service, NULL, NULL);
1179
1180 work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1181 if (work_done < budget && napi_complete_done(napi, work_done)) {
1182 flags |= IONIC_INTR_CRED_UNMASK;
1183 intr->rearm_count++;
1184 }
1185
1186 if (work_done || flags) {
1187 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1188 credits = n_work + a_work + rx_work + tx_work;
1189 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1190 }
1191
1192 return work_done;
1193 }
1194
ionic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * ns)1195 void ionic_get_stats64(struct net_device *netdev,
1196 struct rtnl_link_stats64 *ns)
1197 {
1198 struct ionic_lif *lif = netdev_priv(netdev);
1199 struct ionic_lif_stats *ls;
1200
1201 memset(ns, 0, sizeof(*ns));
1202 ls = &lif->info->stats;
1203
1204 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1205 le64_to_cpu(ls->rx_mcast_packets) +
1206 le64_to_cpu(ls->rx_bcast_packets);
1207
1208 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1209 le64_to_cpu(ls->tx_mcast_packets) +
1210 le64_to_cpu(ls->tx_bcast_packets);
1211
1212 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1213 le64_to_cpu(ls->rx_mcast_bytes) +
1214 le64_to_cpu(ls->rx_bcast_bytes);
1215
1216 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1217 le64_to_cpu(ls->tx_mcast_bytes) +
1218 le64_to_cpu(ls->tx_bcast_bytes);
1219
1220 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1221 le64_to_cpu(ls->rx_mcast_drop_packets) +
1222 le64_to_cpu(ls->rx_bcast_drop_packets);
1223
1224 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1225 le64_to_cpu(ls->tx_mcast_drop_packets) +
1226 le64_to_cpu(ls->tx_bcast_drop_packets);
1227
1228 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1229
1230 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1231
1232 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1233 le64_to_cpu(ls->rx_queue_disabled) +
1234 le64_to_cpu(ls->rx_desc_fetch_error) +
1235 le64_to_cpu(ls->rx_desc_data_error);
1236
1237 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1238 le64_to_cpu(ls->tx_queue_disabled) +
1239 le64_to_cpu(ls->tx_desc_fetch_error) +
1240 le64_to_cpu(ls->tx_desc_data_error);
1241
1242 ns->rx_errors = ns->rx_over_errors +
1243 ns->rx_missed_errors;
1244
1245 ns->tx_errors = ns->tx_aborted_errors;
1246 }
1247
ionic_lif_addr_add(struct ionic_lif * lif,const u8 * addr)1248 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
1249 {
1250 struct ionic_admin_ctx ctx = {
1251 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1252 .cmd.rx_filter_add = {
1253 .opcode = IONIC_CMD_RX_FILTER_ADD,
1254 .lif_index = cpu_to_le16(lif->index),
1255 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
1256 },
1257 };
1258 int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1259 bool mc = is_multicast_ether_addr(addr);
1260 struct ionic_rx_filter *f;
1261 int err = 0;
1262
1263 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
1264
1265 spin_lock_bh(&lif->rx_filters.lock);
1266 f = ionic_rx_filter_by_addr(lif, addr);
1267 if (f) {
1268 /* don't bother if we already have it and it is sync'd */
1269 if (f->state == IONIC_FILTER_STATE_SYNCED) {
1270 spin_unlock_bh(&lif->rx_filters.lock);
1271 return 0;
1272 }
1273
1274 /* mark preemptively as sync'd to block any parallel attempts */
1275 f->state = IONIC_FILTER_STATE_SYNCED;
1276 } else {
1277 /* save as SYNCED to catch any DEL requests while processing */
1278 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
1279 IONIC_FILTER_STATE_SYNCED);
1280 }
1281 spin_unlock_bh(&lif->rx_filters.lock);
1282 if (err)
1283 return err;
1284
1285 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
1286
1287 /* Don't bother with the write to FW if we know there's no room,
1288 * we can try again on the next sync attempt.
1289 */
1290 if ((lif->nucast + lif->nmcast) >= nfilters)
1291 err = -ENOSPC;
1292 else
1293 err = ionic_adminq_post_wait(lif, &ctx);
1294
1295 spin_lock_bh(&lif->rx_filters.lock);
1296 if (err && err != -EEXIST) {
1297 /* set the state back to NEW so we can try again later */
1298 f = ionic_rx_filter_by_addr(lif, addr);
1299 if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
1300 f->state = IONIC_FILTER_STATE_NEW;
1301 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
1302 }
1303
1304 spin_unlock_bh(&lif->rx_filters.lock);
1305
1306 if (err == -ENOSPC)
1307 return 0;
1308 else
1309 return err;
1310 }
1311
1312 if (mc)
1313 lif->nmcast++;
1314 else
1315 lif->nucast++;
1316
1317 f = ionic_rx_filter_by_addr(lif, addr);
1318 if (f && f->state == IONIC_FILTER_STATE_OLD) {
1319 /* Someone requested a delete while we were adding
1320 * so update the filter info with the results from the add
1321 * and the data will be there for the delete on the next
1322 * sync cycle.
1323 */
1324 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
1325 IONIC_FILTER_STATE_OLD);
1326 } else {
1327 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
1328 IONIC_FILTER_STATE_SYNCED);
1329 }
1330
1331 spin_unlock_bh(&lif->rx_filters.lock);
1332
1333 return err;
1334 }
1335
ionic_lif_addr_del(struct ionic_lif * lif,const u8 * addr)1336 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
1337 {
1338 struct ionic_admin_ctx ctx = {
1339 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1340 .cmd.rx_filter_del = {
1341 .opcode = IONIC_CMD_RX_FILTER_DEL,
1342 .lif_index = cpu_to_le16(lif->index),
1343 },
1344 };
1345 struct ionic_rx_filter *f;
1346 int state;
1347 int err;
1348
1349 spin_lock_bh(&lif->rx_filters.lock);
1350 f = ionic_rx_filter_by_addr(lif, addr);
1351 if (!f) {
1352 spin_unlock_bh(&lif->rx_filters.lock);
1353 return -ENOENT;
1354 }
1355
1356 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1357 addr, f->filter_id);
1358
1359 state = f->state;
1360 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1361 ionic_rx_filter_free(lif, f);
1362
1363 if (is_multicast_ether_addr(addr) && lif->nmcast)
1364 lif->nmcast--;
1365 else if (!is_multicast_ether_addr(addr) && lif->nucast)
1366 lif->nucast--;
1367
1368 spin_unlock_bh(&lif->rx_filters.lock);
1369
1370 if (state != IONIC_FILTER_STATE_NEW) {
1371 err = ionic_adminq_post_wait(lif, &ctx);
1372 if (err && err != -EEXIST)
1373 return err;
1374 }
1375
1376 return 0;
1377 }
1378
ionic_addr_add(struct net_device * netdev,const u8 * addr)1379 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1380 {
1381 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1382 }
1383
ionic_addr_del(struct net_device * netdev,const u8 * addr)1384 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1385 {
1386 /* Don't delete our own address from the uc list */
1387 if (ether_addr_equal(addr, netdev->dev_addr))
1388 return 0;
1389
1390 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1391 }
1392
ionic_lif_rx_mode(struct ionic_lif * lif)1393 void ionic_lif_rx_mode(struct ionic_lif *lif)
1394 {
1395 struct net_device *netdev = lif->netdev;
1396 unsigned int nfilters;
1397 unsigned int nd_flags;
1398 char buf[128];
1399 u16 rx_mode;
1400 int i;
1401 #define REMAIN(__x) (sizeof(buf) - (__x))
1402
1403 mutex_lock(&lif->config_lock);
1404
1405 /* grab the flags once for local use */
1406 nd_flags = netdev->flags;
1407
1408 rx_mode = IONIC_RX_MODE_F_UNICAST;
1409 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1410 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1411 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1412 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1413
1414 /* sync the mac filters */
1415 ionic_rx_filter_sync(lif);
1416
1417 /* check for overflow state
1418 * if so, we track that we overflowed and enable NIC PROMISC
1419 * else if the overflow is set and not needed
1420 * we remove our overflow flag and check the netdev flags
1421 * to see if we can disable NIC PROMISC
1422 */
1423 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1424 if ((lif->nucast + lif->nmcast) >= nfilters) {
1425 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1426 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1427 lif->uc_overflow = true;
1428 lif->mc_overflow = true;
1429 } else if (lif->uc_overflow) {
1430 lif->uc_overflow = false;
1431 lif->mc_overflow = false;
1432 if (!(nd_flags & IFF_PROMISC))
1433 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1434 if (!(nd_flags & IFF_ALLMULTI))
1435 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1436 }
1437
1438 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1439 lif->rx_mode, rx_mode);
1440 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1441 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1442 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1443 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1444 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1445 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1446 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1447 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1448 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1449 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1450 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1451 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1452 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1453
1454 if (lif->rx_mode != rx_mode) {
1455 struct ionic_admin_ctx ctx = {
1456 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1457 .cmd.rx_mode_set = {
1458 .opcode = IONIC_CMD_RX_MODE_SET,
1459 .lif_index = cpu_to_le16(lif->index),
1460 },
1461 };
1462 int err;
1463
1464 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1465 err = ionic_adminq_post_wait(lif, &ctx);
1466 if (err)
1467 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1468 rx_mode, err);
1469 else
1470 lif->rx_mode = rx_mode;
1471 }
1472
1473 mutex_unlock(&lif->config_lock);
1474 }
1475
ionic_ndo_set_rx_mode(struct net_device * netdev)1476 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1477 {
1478 struct ionic_lif *lif = netdev_priv(netdev);
1479 struct ionic_deferred_work *work;
1480
1481 /* Sync the kernel filter list with the driver filter list */
1482 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1483 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1484
1485 /* Shove off the rest of the rxmode work to the work task
1486 * which will include syncing the filters to the firmware.
1487 */
1488 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1489 if (!work) {
1490 netdev_err(lif->netdev, "rxmode change dropped\n");
1491 return;
1492 }
1493 work->type = IONIC_DW_TYPE_RX_MODE;
1494 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1495 ionic_lif_deferred_enqueue(&lif->deferred, work);
1496 }
1497
ionic_netdev_features_to_nic(netdev_features_t features)1498 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1499 {
1500 u64 wanted = 0;
1501
1502 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1503 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1504 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1505 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1506 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1507 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1508 if (features & NETIF_F_RXHASH)
1509 wanted |= IONIC_ETH_HW_RX_HASH;
1510 if (features & NETIF_F_RXCSUM)
1511 wanted |= IONIC_ETH_HW_RX_CSUM;
1512 if (features & NETIF_F_SG)
1513 wanted |= IONIC_ETH_HW_TX_SG;
1514 if (features & NETIF_F_HW_CSUM)
1515 wanted |= IONIC_ETH_HW_TX_CSUM;
1516 if (features & NETIF_F_TSO)
1517 wanted |= IONIC_ETH_HW_TSO;
1518 if (features & NETIF_F_TSO6)
1519 wanted |= IONIC_ETH_HW_TSO_IPV6;
1520 if (features & NETIF_F_TSO_ECN)
1521 wanted |= IONIC_ETH_HW_TSO_ECN;
1522 if (features & NETIF_F_GSO_GRE)
1523 wanted |= IONIC_ETH_HW_TSO_GRE;
1524 if (features & NETIF_F_GSO_GRE_CSUM)
1525 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1526 if (features & NETIF_F_GSO_IPXIP4)
1527 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1528 if (features & NETIF_F_GSO_IPXIP6)
1529 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1530 if (features & NETIF_F_GSO_UDP_TUNNEL)
1531 wanted |= IONIC_ETH_HW_TSO_UDP;
1532 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1533 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1534
1535 return cpu_to_le64(wanted);
1536 }
1537
ionic_set_nic_features(struct ionic_lif * lif,netdev_features_t features)1538 static int ionic_set_nic_features(struct ionic_lif *lif,
1539 netdev_features_t features)
1540 {
1541 struct device *dev = lif->ionic->dev;
1542 struct ionic_admin_ctx ctx = {
1543 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1544 .cmd.lif_setattr = {
1545 .opcode = IONIC_CMD_LIF_SETATTR,
1546 .index = cpu_to_le16(lif->index),
1547 .attr = IONIC_LIF_ATTR_FEATURES,
1548 },
1549 };
1550 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1551 IONIC_ETH_HW_VLAN_RX_STRIP |
1552 IONIC_ETH_HW_VLAN_RX_FILTER;
1553 u64 old_hw_features;
1554 int err;
1555
1556 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1557
1558 if (lif->phc)
1559 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1560
1561 err = ionic_adminq_post_wait(lif, &ctx);
1562 if (err)
1563 return err;
1564
1565 old_hw_features = lif->hw_features;
1566 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1567 ctx.comp.lif_setattr.features);
1568
1569 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1570 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1571
1572 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1573 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1574 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1575
1576 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1577 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1578 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1579 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1580 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1581 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1582 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1583 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1584 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1585 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1586 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1587 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1588 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1589 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1590 if (lif->hw_features & IONIC_ETH_HW_TSO)
1591 dev_dbg(dev, "feature ETH_HW_TSO\n");
1592 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1593 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1594 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1595 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1596 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1597 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1598 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1599 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1600 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1601 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1602 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1603 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1604 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1605 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1606 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1607 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1608 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1609 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1610
1611 return 0;
1612 }
1613
ionic_init_nic_features(struct ionic_lif * lif)1614 static int ionic_init_nic_features(struct ionic_lif *lif)
1615 {
1616 struct net_device *netdev = lif->netdev;
1617 netdev_features_t features;
1618 int err;
1619
1620 /* set up what we expect to support by default */
1621 features = NETIF_F_HW_VLAN_CTAG_TX |
1622 NETIF_F_HW_VLAN_CTAG_RX |
1623 NETIF_F_HW_VLAN_CTAG_FILTER |
1624 NETIF_F_SG |
1625 NETIF_F_HW_CSUM |
1626 NETIF_F_RXCSUM |
1627 NETIF_F_TSO |
1628 NETIF_F_TSO6 |
1629 NETIF_F_TSO_ECN;
1630
1631 if (lif->nxqs > 1)
1632 features |= NETIF_F_RXHASH;
1633
1634 err = ionic_set_nic_features(lif, features);
1635 if (err)
1636 return err;
1637
1638 /* tell the netdev what we actually can support */
1639 netdev->features |= NETIF_F_HIGHDMA;
1640
1641 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1642 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1643 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1644 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1645 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1646 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1647 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1648 netdev->hw_features |= NETIF_F_RXHASH;
1649 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1650 netdev->hw_features |= NETIF_F_SG;
1651
1652 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1653 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1654 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1655 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1656 if (lif->hw_features & IONIC_ETH_HW_TSO)
1657 netdev->hw_enc_features |= NETIF_F_TSO;
1658 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1659 netdev->hw_enc_features |= NETIF_F_TSO6;
1660 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1661 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1662 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1663 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1664 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1665 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1666 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1667 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1668 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1669 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1670 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1671 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1672 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1673 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1674
1675 netdev->hw_features |= netdev->hw_enc_features;
1676 netdev->features |= netdev->hw_features;
1677 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1678
1679 netdev->priv_flags |= IFF_UNICAST_FLT |
1680 IFF_LIVE_ADDR_CHANGE;
1681
1682 return 0;
1683 }
1684
ionic_set_features(struct net_device * netdev,netdev_features_t features)1685 static int ionic_set_features(struct net_device *netdev,
1686 netdev_features_t features)
1687 {
1688 struct ionic_lif *lif = netdev_priv(netdev);
1689 int err;
1690
1691 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1692 __func__, (u64)lif->netdev->features, (u64)features);
1693
1694 err = ionic_set_nic_features(lif, features);
1695
1696 return err;
1697 }
1698
ionic_set_attr_mac(struct ionic_lif * lif,u8 * mac)1699 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1700 {
1701 struct ionic_admin_ctx ctx = {
1702 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1703 .cmd.lif_setattr = {
1704 .opcode = IONIC_CMD_LIF_SETATTR,
1705 .index = cpu_to_le16(lif->index),
1706 .attr = IONIC_LIF_ATTR_MAC,
1707 },
1708 };
1709
1710 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1711 return ionic_adminq_post_wait(lif, &ctx);
1712 }
1713
ionic_get_attr_mac(struct ionic_lif * lif,u8 * mac_addr)1714 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1715 {
1716 struct ionic_admin_ctx ctx = {
1717 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1718 .cmd.lif_getattr = {
1719 .opcode = IONIC_CMD_LIF_GETATTR,
1720 .index = cpu_to_le16(lif->index),
1721 .attr = IONIC_LIF_ATTR_MAC,
1722 },
1723 };
1724 int err;
1725
1726 err = ionic_adminq_post_wait(lif, &ctx);
1727 if (err)
1728 return err;
1729
1730 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1731 return 0;
1732 }
1733
ionic_program_mac(struct ionic_lif * lif,u8 * mac)1734 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1735 {
1736 u8 get_mac[ETH_ALEN];
1737 int err;
1738
1739 err = ionic_set_attr_mac(lif, mac);
1740 if (err)
1741 return err;
1742
1743 err = ionic_get_attr_mac(lif, get_mac);
1744 if (err)
1745 return err;
1746
1747 /* To deal with older firmware that silently ignores the set attr mac:
1748 * doesn't actually change the mac and doesn't return an error, so we
1749 * do the get attr to verify whether or not the set actually happened
1750 */
1751 if (!ether_addr_equal(get_mac, mac))
1752 return 1;
1753
1754 return 0;
1755 }
1756
ionic_set_mac_address(struct net_device * netdev,void * sa)1757 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1758 {
1759 struct ionic_lif *lif = netdev_priv(netdev);
1760 struct sockaddr *addr = sa;
1761 u8 *mac;
1762 int err;
1763
1764 mac = (u8 *)addr->sa_data;
1765 if (ether_addr_equal(netdev->dev_addr, mac))
1766 return 0;
1767
1768 err = ionic_program_mac(lif, mac);
1769 if (err < 0)
1770 return err;
1771
1772 if (err > 0)
1773 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1774 __func__);
1775
1776 err = eth_prepare_mac_addr_change(netdev, addr);
1777 if (err)
1778 return err;
1779
1780 if (!is_zero_ether_addr(netdev->dev_addr)) {
1781 netdev_info(netdev, "deleting mac addr %pM\n",
1782 netdev->dev_addr);
1783 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1784 }
1785
1786 eth_commit_mac_addr_change(netdev, addr);
1787 netdev_info(netdev, "updating mac addr %pM\n", mac);
1788
1789 return ionic_lif_addr_add(netdev_priv(netdev), mac);
1790 }
1791
ionic_stop_queues_reconfig(struct ionic_lif * lif)1792 static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1793 {
1794 /* Stop and clean the queues before reconfiguration */
1795 netif_device_detach(lif->netdev);
1796 ionic_stop_queues(lif);
1797 ionic_txrx_deinit(lif);
1798 }
1799
ionic_start_queues_reconfig(struct ionic_lif * lif)1800 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1801 {
1802 int err;
1803
1804 /* Re-init the queues after reconfiguration */
1805
1806 /* The only way txrx_init can fail here is if communication
1807 * with FW is suddenly broken. There's not much we can do
1808 * at this point - error messages have already been printed,
1809 * so we can continue on and the user can eventually do a
1810 * DOWN and UP to try to reset and clear the issue.
1811 */
1812 err = ionic_txrx_init(lif);
1813 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1814 netif_device_attach(lif->netdev);
1815
1816 return err;
1817 }
1818
ionic_change_mtu(struct net_device * netdev,int new_mtu)1819 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1820 {
1821 struct ionic_lif *lif = netdev_priv(netdev);
1822 struct ionic_admin_ctx ctx = {
1823 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1824 .cmd.lif_setattr = {
1825 .opcode = IONIC_CMD_LIF_SETATTR,
1826 .index = cpu_to_le16(lif->index),
1827 .attr = IONIC_LIF_ATTR_MTU,
1828 .mtu = cpu_to_le32(new_mtu),
1829 },
1830 };
1831 int err;
1832
1833 err = ionic_adminq_post_wait(lif, &ctx);
1834 if (err)
1835 return err;
1836
1837 /* if we're not running, nothing more to do */
1838 if (!netif_running(netdev)) {
1839 netdev->mtu = new_mtu;
1840 return 0;
1841 }
1842
1843 mutex_lock(&lif->queue_lock);
1844 ionic_stop_queues_reconfig(lif);
1845 netdev->mtu = new_mtu;
1846 err = ionic_start_queues_reconfig(lif);
1847 mutex_unlock(&lif->queue_lock);
1848
1849 return err;
1850 }
1851
ionic_tx_timeout_work(struct work_struct * ws)1852 static void ionic_tx_timeout_work(struct work_struct *ws)
1853 {
1854 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1855
1856 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1857 return;
1858
1859 /* if we were stopped before this scheduled job was launched,
1860 * don't bother the queues as they are already stopped.
1861 */
1862 if (!netif_running(lif->netdev))
1863 return;
1864
1865 mutex_lock(&lif->queue_lock);
1866 ionic_stop_queues_reconfig(lif);
1867 ionic_start_queues_reconfig(lif);
1868 mutex_unlock(&lif->queue_lock);
1869 }
1870
ionic_tx_timeout(struct net_device * netdev,unsigned int txqueue)1871 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1872 {
1873 struct ionic_lif *lif = netdev_priv(netdev);
1874
1875 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1876 schedule_work(&lif->tx_timeout_work);
1877 }
1878
ionic_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1879 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1880 u16 vid)
1881 {
1882 struct ionic_lif *lif = netdev_priv(netdev);
1883 struct ionic_admin_ctx ctx = {
1884 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1885 .cmd.rx_filter_add = {
1886 .opcode = IONIC_CMD_RX_FILTER_ADD,
1887 .lif_index = cpu_to_le16(lif->index),
1888 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1889 .vlan.vlan = cpu_to_le16(vid),
1890 },
1891 };
1892 int err;
1893
1894 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
1895 err = ionic_adminq_post_wait(lif, &ctx);
1896 if (err)
1897 return err;
1898
1899 spin_lock_bh(&lif->rx_filters.lock);
1900 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
1901 IONIC_FILTER_STATE_SYNCED);
1902 spin_unlock_bh(&lif->rx_filters.lock);
1903
1904 return err;
1905 }
1906
ionic_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1907 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1908 u16 vid)
1909 {
1910 struct ionic_lif *lif = netdev_priv(netdev);
1911 struct ionic_admin_ctx ctx = {
1912 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1913 .cmd.rx_filter_del = {
1914 .opcode = IONIC_CMD_RX_FILTER_DEL,
1915 .lif_index = cpu_to_le16(lif->index),
1916 },
1917 };
1918 struct ionic_rx_filter *f;
1919
1920 spin_lock_bh(&lif->rx_filters.lock);
1921
1922 f = ionic_rx_filter_by_vlan(lif, vid);
1923 if (!f) {
1924 spin_unlock_bh(&lif->rx_filters.lock);
1925 return -ENOENT;
1926 }
1927
1928 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1929 vid, f->filter_id);
1930
1931 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1932 ionic_rx_filter_free(lif, f);
1933 spin_unlock_bh(&lif->rx_filters.lock);
1934
1935 return ionic_adminq_post_wait(lif, &ctx);
1936 }
1937
ionic_lif_rss_config(struct ionic_lif * lif,const u16 types,const u8 * key,const u32 * indir)1938 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1939 const u8 *key, const u32 *indir)
1940 {
1941 struct ionic_admin_ctx ctx = {
1942 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1943 .cmd.lif_setattr = {
1944 .opcode = IONIC_CMD_LIF_SETATTR,
1945 .attr = IONIC_LIF_ATTR_RSS,
1946 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1947 },
1948 };
1949 unsigned int i, tbl_sz;
1950
1951 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1952 lif->rss_types = types;
1953 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1954 }
1955
1956 if (key)
1957 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1958
1959 if (indir) {
1960 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1961 for (i = 0; i < tbl_sz; i++)
1962 lif->rss_ind_tbl[i] = indir[i];
1963 }
1964
1965 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1966 IONIC_RSS_HASH_KEY_SIZE);
1967
1968 return ionic_adminq_post_wait(lif, &ctx);
1969 }
1970
ionic_lif_rss_init(struct ionic_lif * lif)1971 static int ionic_lif_rss_init(struct ionic_lif *lif)
1972 {
1973 unsigned int tbl_sz;
1974 unsigned int i;
1975
1976 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1977 IONIC_RSS_TYPE_IPV4_TCP |
1978 IONIC_RSS_TYPE_IPV4_UDP |
1979 IONIC_RSS_TYPE_IPV6 |
1980 IONIC_RSS_TYPE_IPV6_TCP |
1981 IONIC_RSS_TYPE_IPV6_UDP;
1982
1983 /* Fill indirection table with 'default' values */
1984 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1985 for (i = 0; i < tbl_sz; i++)
1986 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1987
1988 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1989 }
1990
ionic_lif_rss_deinit(struct ionic_lif * lif)1991 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1992 {
1993 int tbl_sz;
1994
1995 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1996 memset(lif->rss_ind_tbl, 0, tbl_sz);
1997 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1998
1999 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
2000 }
2001
ionic_lif_quiesce(struct ionic_lif * lif)2002 static void ionic_lif_quiesce(struct ionic_lif *lif)
2003 {
2004 struct ionic_admin_ctx ctx = {
2005 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2006 .cmd.lif_setattr = {
2007 .opcode = IONIC_CMD_LIF_SETATTR,
2008 .index = cpu_to_le16(lif->index),
2009 .attr = IONIC_LIF_ATTR_STATE,
2010 .state = IONIC_LIF_QUIESCE,
2011 },
2012 };
2013 int err;
2014
2015 err = ionic_adminq_post_wait(lif, &ctx);
2016 if (err)
2017 netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
2018 }
2019
ionic_txrx_disable(struct ionic_lif * lif)2020 static void ionic_txrx_disable(struct ionic_lif *lif)
2021 {
2022 unsigned int i;
2023 int err = 0;
2024
2025 if (lif->txqcqs) {
2026 for (i = 0; i < lif->nxqs; i++)
2027 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
2028 }
2029
2030 if (lif->hwstamp_txq)
2031 err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
2032
2033 if (lif->rxqcqs) {
2034 for (i = 0; i < lif->nxqs; i++)
2035 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
2036 }
2037
2038 if (lif->hwstamp_rxq)
2039 err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
2040
2041 ionic_lif_quiesce(lif);
2042 }
2043
ionic_txrx_deinit(struct ionic_lif * lif)2044 static void ionic_txrx_deinit(struct ionic_lif *lif)
2045 {
2046 unsigned int i;
2047
2048 if (lif->txqcqs) {
2049 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
2050 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2051 ionic_tx_flush(&lif->txqcqs[i]->cq);
2052 ionic_tx_empty(&lif->txqcqs[i]->q);
2053 }
2054 }
2055
2056 if (lif->rxqcqs) {
2057 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
2058 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2059 ionic_rx_empty(&lif->rxqcqs[i]->q);
2060 }
2061 }
2062 lif->rx_mode = 0;
2063
2064 if (lif->hwstamp_txq) {
2065 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
2066 ionic_tx_flush(&lif->hwstamp_txq->cq);
2067 ionic_tx_empty(&lif->hwstamp_txq->q);
2068 }
2069
2070 if (lif->hwstamp_rxq) {
2071 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
2072 ionic_rx_empty(&lif->hwstamp_rxq->q);
2073 }
2074 }
2075
ionic_txrx_free(struct ionic_lif * lif)2076 static void ionic_txrx_free(struct ionic_lif *lif)
2077 {
2078 unsigned int i;
2079
2080 if (lif->txqcqs) {
2081 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
2082 ionic_qcq_free(lif, lif->txqcqs[i]);
2083 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
2084 lif->txqcqs[i] = NULL;
2085 }
2086 }
2087
2088 if (lif->rxqcqs) {
2089 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2090 ionic_qcq_free(lif, lif->rxqcqs[i]);
2091 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
2092 lif->rxqcqs[i] = NULL;
2093 }
2094 }
2095
2096 if (lif->hwstamp_txq) {
2097 ionic_qcq_free(lif, lif->hwstamp_txq);
2098 devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
2099 lif->hwstamp_txq = NULL;
2100 }
2101
2102 if (lif->hwstamp_rxq) {
2103 ionic_qcq_free(lif, lif->hwstamp_rxq);
2104 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2105 lif->hwstamp_rxq = NULL;
2106 }
2107 }
2108
ionic_txrx_alloc(struct ionic_lif * lif)2109 static int ionic_txrx_alloc(struct ionic_lif *lif)
2110 {
2111 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2112 unsigned int flags, i;
2113 int err = 0;
2114
2115 num_desc = lif->ntxq_descs;
2116 desc_sz = sizeof(struct ionic_txq_desc);
2117 comp_sz = sizeof(struct ionic_txq_comp);
2118
2119 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2120 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2121 sizeof(struct ionic_txq_sg_desc_v1))
2122 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2123 else
2124 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2125
2126 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2127 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2128 flags |= IONIC_QCQ_F_INTR;
2129 for (i = 0; i < lif->nxqs; i++) {
2130 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2131 num_desc, desc_sz, comp_sz, sg_desc_sz,
2132 lif->kern_pid, &lif->txqcqs[i]);
2133 if (err)
2134 goto err_out;
2135
2136 if (flags & IONIC_QCQ_F_INTR) {
2137 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2138 lif->txqcqs[i]->intr.index,
2139 lif->tx_coalesce_hw);
2140 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2141 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2142 }
2143
2144 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2145 }
2146
2147 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2148
2149 num_desc = lif->nrxq_descs;
2150 desc_sz = sizeof(struct ionic_rxq_desc);
2151 comp_sz = sizeof(struct ionic_rxq_comp);
2152 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2153
2154 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2155 comp_sz *= 2;
2156
2157 for (i = 0; i < lif->nxqs; i++) {
2158 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2159 num_desc, desc_sz, comp_sz, sg_desc_sz,
2160 lif->kern_pid, &lif->rxqcqs[i]);
2161 if (err)
2162 goto err_out;
2163
2164 lif->rxqcqs[i]->q.features = lif->rxq_features;
2165
2166 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2167 lif->rxqcqs[i]->intr.index,
2168 lif->rx_coalesce_hw);
2169 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2170 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2171
2172 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2173 ionic_link_qcq_interrupts(lif->rxqcqs[i],
2174 lif->txqcqs[i]);
2175
2176 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2177 }
2178
2179 return 0;
2180
2181 err_out:
2182 ionic_txrx_free(lif);
2183
2184 return err;
2185 }
2186
ionic_txrx_init(struct ionic_lif * lif)2187 static int ionic_txrx_init(struct ionic_lif *lif)
2188 {
2189 unsigned int i;
2190 int err;
2191
2192 for (i = 0; i < lif->nxqs; i++) {
2193 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2194 if (err)
2195 goto err_out;
2196
2197 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2198 if (err) {
2199 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2200 goto err_out;
2201 }
2202 }
2203
2204 if (lif->netdev->features & NETIF_F_RXHASH)
2205 ionic_lif_rss_init(lif);
2206
2207 ionic_lif_rx_mode(lif);
2208
2209 return 0;
2210
2211 err_out:
2212 while (i--) {
2213 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2214 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2215 }
2216
2217 return err;
2218 }
2219
ionic_txrx_enable(struct ionic_lif * lif)2220 static int ionic_txrx_enable(struct ionic_lif *lif)
2221 {
2222 int derr = 0;
2223 int i, err;
2224
2225 for (i = 0; i < lif->nxqs; i++) {
2226 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2227 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2228 err = -ENXIO;
2229 goto err_out;
2230 }
2231
2232 ionic_rx_fill(&lif->rxqcqs[i]->q);
2233 err = ionic_qcq_enable(lif->rxqcqs[i]);
2234 if (err)
2235 goto err_out;
2236
2237 err = ionic_qcq_enable(lif->txqcqs[i]);
2238 if (err) {
2239 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
2240 goto err_out;
2241 }
2242 }
2243
2244 if (lif->hwstamp_rxq) {
2245 ionic_rx_fill(&lif->hwstamp_rxq->q);
2246 err = ionic_qcq_enable(lif->hwstamp_rxq);
2247 if (err)
2248 goto err_out_hwstamp_rx;
2249 }
2250
2251 if (lif->hwstamp_txq) {
2252 err = ionic_qcq_enable(lif->hwstamp_txq);
2253 if (err)
2254 goto err_out_hwstamp_tx;
2255 }
2256
2257 return 0;
2258
2259 err_out_hwstamp_tx:
2260 if (lif->hwstamp_rxq)
2261 derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
2262 err_out_hwstamp_rx:
2263 i = lif->nxqs;
2264 err_out:
2265 while (i--) {
2266 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
2267 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
2268 }
2269
2270 return err;
2271 }
2272
ionic_start_queues(struct ionic_lif * lif)2273 static int ionic_start_queues(struct ionic_lif *lif)
2274 {
2275 int err;
2276
2277 if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2278 return -EIO;
2279
2280 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2281 return -EBUSY;
2282
2283 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2284 return 0;
2285
2286 err = ionic_txrx_enable(lif);
2287 if (err) {
2288 clear_bit(IONIC_LIF_F_UP, lif->state);
2289 return err;
2290 }
2291 netif_tx_wake_all_queues(lif->netdev);
2292
2293 return 0;
2294 }
2295
ionic_open(struct net_device * netdev)2296 static int ionic_open(struct net_device *netdev)
2297 {
2298 struct ionic_lif *lif = netdev_priv(netdev);
2299 int err;
2300
2301 /* If recovering from a broken state, clear the bit and we'll try again */
2302 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2303 netdev_info(netdev, "clearing broken state\n");
2304
2305 mutex_lock(&lif->queue_lock);
2306
2307 err = ionic_txrx_alloc(lif);
2308 if (err)
2309 goto err_unlock;
2310
2311 err = ionic_txrx_init(lif);
2312 if (err)
2313 goto err_txrx_free;
2314
2315 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2316 if (err)
2317 goto err_txrx_deinit;
2318
2319 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2320 if (err)
2321 goto err_txrx_deinit;
2322
2323 /* don't start the queues until we have link */
2324 if (netif_carrier_ok(netdev)) {
2325 err = ionic_start_queues(lif);
2326 if (err)
2327 goto err_txrx_deinit;
2328 }
2329
2330 /* If hardware timestamping is enabled, but the queues were freed by
2331 * ionic_stop, those need to be reallocated and initialized, too.
2332 */
2333 ionic_lif_hwstamp_recreate_queues(lif);
2334
2335 mutex_unlock(&lif->queue_lock);
2336
2337 return 0;
2338
2339 err_txrx_deinit:
2340 ionic_txrx_deinit(lif);
2341 err_txrx_free:
2342 ionic_txrx_free(lif);
2343 err_unlock:
2344 mutex_unlock(&lif->queue_lock);
2345 return err;
2346 }
2347
ionic_stop_queues(struct ionic_lif * lif)2348 static void ionic_stop_queues(struct ionic_lif *lif)
2349 {
2350 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2351 return;
2352
2353 netif_tx_disable(lif->netdev);
2354 ionic_txrx_disable(lif);
2355 }
2356
ionic_stop(struct net_device * netdev)2357 static int ionic_stop(struct net_device *netdev)
2358 {
2359 struct ionic_lif *lif = netdev_priv(netdev);
2360
2361 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2362 return 0;
2363
2364 mutex_lock(&lif->queue_lock);
2365 ionic_stop_queues(lif);
2366 ionic_txrx_deinit(lif);
2367 ionic_txrx_free(lif);
2368 mutex_unlock(&lif->queue_lock);
2369
2370 return 0;
2371 }
2372
ionic_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2373 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2374 {
2375 struct ionic_lif *lif = netdev_priv(netdev);
2376
2377 switch (cmd) {
2378 case SIOCSHWTSTAMP:
2379 return ionic_lif_hwstamp_set(lif, ifr);
2380 case SIOCGHWTSTAMP:
2381 return ionic_lif_hwstamp_get(lif, ifr);
2382 default:
2383 return -EOPNOTSUPP;
2384 }
2385 }
2386
ionic_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivf)2387 static int ionic_get_vf_config(struct net_device *netdev,
2388 int vf, struct ifla_vf_info *ivf)
2389 {
2390 struct ionic_lif *lif = netdev_priv(netdev);
2391 struct ionic *ionic = lif->ionic;
2392 int ret = 0;
2393
2394 if (!netif_device_present(netdev))
2395 return -EBUSY;
2396
2397 down_read(&ionic->vf_op_lock);
2398
2399 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2400 ret = -EINVAL;
2401 } else {
2402 ivf->vf = vf;
2403 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
2404 ivf->qos = 0;
2405 ivf->spoofchk = ionic->vfs[vf].spoofchk;
2406 ivf->linkstate = ionic->vfs[vf].linkstate;
2407 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
2408 ivf->trusted = ionic->vfs[vf].trusted;
2409 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
2410 }
2411
2412 up_read(&ionic->vf_op_lock);
2413 return ret;
2414 }
2415
ionic_get_vf_stats(struct net_device * netdev,int vf,struct ifla_vf_stats * vf_stats)2416 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2417 struct ifla_vf_stats *vf_stats)
2418 {
2419 struct ionic_lif *lif = netdev_priv(netdev);
2420 struct ionic *ionic = lif->ionic;
2421 struct ionic_lif_stats *vs;
2422 int ret = 0;
2423
2424 if (!netif_device_present(netdev))
2425 return -EBUSY;
2426
2427 down_read(&ionic->vf_op_lock);
2428
2429 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2430 ret = -EINVAL;
2431 } else {
2432 memset(vf_stats, 0, sizeof(*vf_stats));
2433 vs = &ionic->vfs[vf].stats;
2434
2435 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2436 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2437 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
2438 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
2439 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
2440 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
2441 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2442 le64_to_cpu(vs->rx_mcast_drop_packets) +
2443 le64_to_cpu(vs->rx_bcast_drop_packets);
2444 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2445 le64_to_cpu(vs->tx_mcast_drop_packets) +
2446 le64_to_cpu(vs->tx_bcast_drop_packets);
2447 }
2448
2449 up_read(&ionic->vf_op_lock);
2450 return ret;
2451 }
2452
ionic_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2453 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2454 {
2455 struct ionic_lif *lif = netdev_priv(netdev);
2456 struct ionic *ionic = lif->ionic;
2457 int ret;
2458
2459 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2460 return -EINVAL;
2461
2462 if (!netif_device_present(netdev))
2463 return -EBUSY;
2464
2465 down_write(&ionic->vf_op_lock);
2466
2467 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2468 ret = -EINVAL;
2469 } else {
2470 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
2471 if (!ret)
2472 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2473 }
2474
2475 up_write(&ionic->vf_op_lock);
2476 return ret;
2477 }
2478
ionic_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2479 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2480 u8 qos, __be16 proto)
2481 {
2482 struct ionic_lif *lif = netdev_priv(netdev);
2483 struct ionic *ionic = lif->ionic;
2484 int ret;
2485
2486 /* until someday when we support qos */
2487 if (qos)
2488 return -EINVAL;
2489
2490 if (vlan > 4095)
2491 return -EINVAL;
2492
2493 if (proto != htons(ETH_P_8021Q))
2494 return -EPROTONOSUPPORT;
2495
2496 if (!netif_device_present(netdev))
2497 return -EBUSY;
2498
2499 down_write(&ionic->vf_op_lock);
2500
2501 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2502 ret = -EINVAL;
2503 } else {
2504 ret = ionic_set_vf_config(ionic, vf,
2505 IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2506 if (!ret)
2507 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2508 }
2509
2510 up_write(&ionic->vf_op_lock);
2511 return ret;
2512 }
2513
ionic_set_vf_rate(struct net_device * netdev,int vf,int tx_min,int tx_max)2514 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2515 int tx_min, int tx_max)
2516 {
2517 struct ionic_lif *lif = netdev_priv(netdev);
2518 struct ionic *ionic = lif->ionic;
2519 int ret;
2520
2521 /* setting the min just seems silly */
2522 if (tx_min)
2523 return -EINVAL;
2524
2525 if (!netif_device_present(netdev))
2526 return -EBUSY;
2527
2528 down_write(&ionic->vf_op_lock);
2529
2530 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2531 ret = -EINVAL;
2532 } else {
2533 ret = ionic_set_vf_config(ionic, vf,
2534 IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2535 if (!ret)
2536 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2537 }
2538
2539 up_write(&ionic->vf_op_lock);
2540 return ret;
2541 }
2542
ionic_set_vf_spoofchk(struct net_device * netdev,int vf,bool set)2543 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2544 {
2545 struct ionic_lif *lif = netdev_priv(netdev);
2546 struct ionic *ionic = lif->ionic;
2547 u8 data = set; /* convert to u8 for config */
2548 int ret;
2549
2550 if (!netif_device_present(netdev))
2551 return -EBUSY;
2552
2553 down_write(&ionic->vf_op_lock);
2554
2555 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2556 ret = -EINVAL;
2557 } else {
2558 ret = ionic_set_vf_config(ionic, vf,
2559 IONIC_VF_ATTR_SPOOFCHK, &data);
2560 if (!ret)
2561 ionic->vfs[vf].spoofchk = data;
2562 }
2563
2564 up_write(&ionic->vf_op_lock);
2565 return ret;
2566 }
2567
ionic_set_vf_trust(struct net_device * netdev,int vf,bool set)2568 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2569 {
2570 struct ionic_lif *lif = netdev_priv(netdev);
2571 struct ionic *ionic = lif->ionic;
2572 u8 data = set; /* convert to u8 for config */
2573 int ret;
2574
2575 if (!netif_device_present(netdev))
2576 return -EBUSY;
2577
2578 down_write(&ionic->vf_op_lock);
2579
2580 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2581 ret = -EINVAL;
2582 } else {
2583 ret = ionic_set_vf_config(ionic, vf,
2584 IONIC_VF_ATTR_TRUST, &data);
2585 if (!ret)
2586 ionic->vfs[vf].trusted = data;
2587 }
2588
2589 up_write(&ionic->vf_op_lock);
2590 return ret;
2591 }
2592
ionic_set_vf_link_state(struct net_device * netdev,int vf,int set)2593 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2594 {
2595 struct ionic_lif *lif = netdev_priv(netdev);
2596 struct ionic *ionic = lif->ionic;
2597 u8 data;
2598 int ret;
2599
2600 switch (set) {
2601 case IFLA_VF_LINK_STATE_ENABLE:
2602 data = IONIC_VF_LINK_STATUS_UP;
2603 break;
2604 case IFLA_VF_LINK_STATE_DISABLE:
2605 data = IONIC_VF_LINK_STATUS_DOWN;
2606 break;
2607 case IFLA_VF_LINK_STATE_AUTO:
2608 data = IONIC_VF_LINK_STATUS_AUTO;
2609 break;
2610 default:
2611 return -EINVAL;
2612 }
2613
2614 if (!netif_device_present(netdev))
2615 return -EBUSY;
2616
2617 down_write(&ionic->vf_op_lock);
2618
2619 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2620 ret = -EINVAL;
2621 } else {
2622 ret = ionic_set_vf_config(ionic, vf,
2623 IONIC_VF_ATTR_LINKSTATE, &data);
2624 if (!ret)
2625 ionic->vfs[vf].linkstate = set;
2626 }
2627
2628 up_write(&ionic->vf_op_lock);
2629 return ret;
2630 }
2631
2632 static const struct net_device_ops ionic_netdev_ops = {
2633 .ndo_open = ionic_open,
2634 .ndo_stop = ionic_stop,
2635 .ndo_eth_ioctl = ionic_eth_ioctl,
2636 .ndo_start_xmit = ionic_start_xmit,
2637 .ndo_get_stats64 = ionic_get_stats64,
2638 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2639 .ndo_set_features = ionic_set_features,
2640 .ndo_set_mac_address = ionic_set_mac_address,
2641 .ndo_validate_addr = eth_validate_addr,
2642 .ndo_tx_timeout = ionic_tx_timeout,
2643 .ndo_change_mtu = ionic_change_mtu,
2644 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2645 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2646 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2647 .ndo_set_vf_trust = ionic_set_vf_trust,
2648 .ndo_set_vf_mac = ionic_set_vf_mac,
2649 .ndo_set_vf_rate = ionic_set_vf_rate,
2650 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2651 .ndo_get_vf_config = ionic_get_vf_config,
2652 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2653 .ndo_get_vf_stats = ionic_get_vf_stats,
2654 };
2655
ionic_swap_queues(struct ionic_qcq * a,struct ionic_qcq * b)2656 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2657 {
2658 /* only swapping the queues, not the napi, flags, or other stuff */
2659 swap(a->q.features, b->q.features);
2660 swap(a->q.num_descs, b->q.num_descs);
2661 swap(a->q.desc_size, b->q.desc_size);
2662 swap(a->q.base, b->q.base);
2663 swap(a->q.base_pa, b->q.base_pa);
2664 swap(a->q.info, b->q.info);
2665 swap(a->q_base, b->q_base);
2666 swap(a->q_base_pa, b->q_base_pa);
2667 swap(a->q_size, b->q_size);
2668
2669 swap(a->q.sg_desc_size, b->q.sg_desc_size);
2670 swap(a->q.sg_base, b->q.sg_base);
2671 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2672 swap(a->sg_base, b->sg_base);
2673 swap(a->sg_base_pa, b->sg_base_pa);
2674 swap(a->sg_size, b->sg_size);
2675
2676 swap(a->cq.num_descs, b->cq.num_descs);
2677 swap(a->cq.desc_size, b->cq.desc_size);
2678 swap(a->cq.base, b->cq.base);
2679 swap(a->cq.base_pa, b->cq.base_pa);
2680 swap(a->cq.info, b->cq.info);
2681 swap(a->cq_base, b->cq_base);
2682 swap(a->cq_base_pa, b->cq_base_pa);
2683 swap(a->cq_size, b->cq_size);
2684
2685 ionic_debugfs_del_qcq(a);
2686 ionic_debugfs_add_qcq(a->q.lif, a);
2687 }
2688
ionic_reconfigure_queues(struct ionic_lif * lif,struct ionic_queue_params * qparam)2689 int ionic_reconfigure_queues(struct ionic_lif *lif,
2690 struct ionic_queue_params *qparam)
2691 {
2692 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2693 struct ionic_qcq **tx_qcqs = NULL;
2694 struct ionic_qcq **rx_qcqs = NULL;
2695 unsigned int flags, i;
2696 int err = 0;
2697
2698 /* allocate temporary qcq arrays to hold new queue structs */
2699 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2700 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2701 sizeof(struct ionic_qcq *), GFP_KERNEL);
2702 if (!tx_qcqs) {
2703 err = -ENOMEM;
2704 goto err_out;
2705 }
2706 }
2707 if (qparam->nxqs != lif->nxqs ||
2708 qparam->nrxq_descs != lif->nrxq_descs ||
2709 qparam->rxq_features != lif->rxq_features) {
2710 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2711 sizeof(struct ionic_qcq *), GFP_KERNEL);
2712 if (!rx_qcqs) {
2713 err = -ENOMEM;
2714 goto err_out;
2715 }
2716 }
2717
2718 /* allocate new desc_info and rings, but leave the interrupt setup
2719 * until later so as to not mess with the still-running queues
2720 */
2721 if (tx_qcqs) {
2722 num_desc = qparam->ntxq_descs;
2723 desc_sz = sizeof(struct ionic_txq_desc);
2724 comp_sz = sizeof(struct ionic_txq_comp);
2725
2726 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2727 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2728 sizeof(struct ionic_txq_sg_desc_v1))
2729 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2730 else
2731 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2732
2733 for (i = 0; i < qparam->nxqs; i++) {
2734 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2735 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2736 num_desc, desc_sz, comp_sz, sg_desc_sz,
2737 lif->kern_pid, &tx_qcqs[i]);
2738 if (err)
2739 goto err_out;
2740 }
2741 }
2742
2743 if (rx_qcqs) {
2744 num_desc = qparam->nrxq_descs;
2745 desc_sz = sizeof(struct ionic_rxq_desc);
2746 comp_sz = sizeof(struct ionic_rxq_comp);
2747 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2748
2749 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2750 comp_sz *= 2;
2751
2752 for (i = 0; i < qparam->nxqs; i++) {
2753 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2754 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2755 num_desc, desc_sz, comp_sz, sg_desc_sz,
2756 lif->kern_pid, &rx_qcqs[i]);
2757 if (err)
2758 goto err_out;
2759
2760 rx_qcqs[i]->q.features = qparam->rxq_features;
2761 }
2762 }
2763
2764 /* stop and clean the queues */
2765 ionic_stop_queues_reconfig(lif);
2766
2767 if (qparam->nxqs != lif->nxqs) {
2768 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2769 if (err)
2770 goto err_out_reinit_unlock;
2771 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2772 if (err) {
2773 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2774 goto err_out_reinit_unlock;
2775 }
2776 }
2777
2778 /* swap new desc_info and rings, keeping existing interrupt config */
2779 if (tx_qcqs) {
2780 lif->ntxq_descs = qparam->ntxq_descs;
2781 for (i = 0; i < qparam->nxqs; i++)
2782 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2783 }
2784
2785 if (rx_qcqs) {
2786 lif->nrxq_descs = qparam->nrxq_descs;
2787 for (i = 0; i < qparam->nxqs; i++)
2788 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2789 }
2790
2791 /* if we need to change the interrupt layout, this is the time */
2792 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2793 qparam->nxqs != lif->nxqs) {
2794 if (qparam->intr_split) {
2795 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2796 } else {
2797 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2798 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2799 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2800 }
2801
2802 /* clear existing interrupt assignments */
2803 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2804 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2805 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2806 }
2807
2808 /* re-assign the interrupts */
2809 for (i = 0; i < qparam->nxqs; i++) {
2810 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2811 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2812 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2813 lif->rxqcqs[i]->intr.index,
2814 lif->rx_coalesce_hw);
2815
2816 if (qparam->intr_split) {
2817 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2818 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2819 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2820 lif->txqcqs[i]->intr.index,
2821 lif->tx_coalesce_hw);
2822 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2823 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2824 } else {
2825 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2826 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2827 }
2828 }
2829 }
2830
2831 /* now we can rework the debugfs mappings */
2832 if (tx_qcqs) {
2833 for (i = 0; i < qparam->nxqs; i++) {
2834 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2835 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2836 }
2837 }
2838
2839 if (rx_qcqs) {
2840 for (i = 0; i < qparam->nxqs; i++) {
2841 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2842 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2843 }
2844 }
2845
2846 swap(lif->nxqs, qparam->nxqs);
2847 swap(lif->rxq_features, qparam->rxq_features);
2848
2849 err_out_reinit_unlock:
2850 /* re-init the queues, but don't lose an error code */
2851 if (err)
2852 ionic_start_queues_reconfig(lif);
2853 else
2854 err = ionic_start_queues_reconfig(lif);
2855
2856 err_out:
2857 /* free old allocs without cleaning intr */
2858 for (i = 0; i < qparam->nxqs; i++) {
2859 if (tx_qcqs && tx_qcqs[i]) {
2860 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2861 ionic_qcq_free(lif, tx_qcqs[i]);
2862 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2863 tx_qcqs[i] = NULL;
2864 }
2865 if (rx_qcqs && rx_qcqs[i]) {
2866 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2867 ionic_qcq_free(lif, rx_qcqs[i]);
2868 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2869 rx_qcqs[i] = NULL;
2870 }
2871 }
2872
2873 /* free q array */
2874 if (rx_qcqs) {
2875 devm_kfree(lif->ionic->dev, rx_qcqs);
2876 rx_qcqs = NULL;
2877 }
2878 if (tx_qcqs) {
2879 devm_kfree(lif->ionic->dev, tx_qcqs);
2880 tx_qcqs = NULL;
2881 }
2882
2883 /* clean the unused dma and info allocations when new set is smaller
2884 * than the full array, but leave the qcq shells in place
2885 */
2886 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2887 if (lif->txqcqs && lif->txqcqs[i]) {
2888 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2889 ionic_qcq_free(lif, lif->txqcqs[i]);
2890 }
2891
2892 if (lif->rxqcqs && lif->rxqcqs[i]) {
2893 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2894 ionic_qcq_free(lif, lif->rxqcqs[i]);
2895 }
2896 }
2897
2898 if (err)
2899 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
2900
2901 return err;
2902 }
2903
ionic_lif_alloc(struct ionic * ionic)2904 int ionic_lif_alloc(struct ionic *ionic)
2905 {
2906 struct device *dev = ionic->dev;
2907 union ionic_lif_identity *lid;
2908 struct net_device *netdev;
2909 struct ionic_lif *lif;
2910 int tbl_sz;
2911 int err;
2912
2913 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2914 if (!lid)
2915 return -ENOMEM;
2916
2917 netdev = alloc_etherdev_mqs(sizeof(*lif),
2918 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2919 if (!netdev) {
2920 dev_err(dev, "Cannot allocate netdev, aborting\n");
2921 err = -ENOMEM;
2922 goto err_out_free_lid;
2923 }
2924
2925 SET_NETDEV_DEV(netdev, dev);
2926
2927 lif = netdev_priv(netdev);
2928 lif->netdev = netdev;
2929 ionic->lif = lif;
2930 netdev->netdev_ops = &ionic_netdev_ops;
2931 ionic_ethtool_set_ops(netdev);
2932
2933 netdev->watchdog_timeo = 2 * HZ;
2934 netif_carrier_off(netdev);
2935
2936 lif->identity = lid;
2937 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2938 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2939 if (err) {
2940 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2941 lif->lif_type, err);
2942 goto err_out_free_netdev;
2943 }
2944 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2945 le32_to_cpu(lif->identity->eth.min_frame_size));
2946 lif->netdev->max_mtu =
2947 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
2948
2949 lif->neqs = ionic->neqs_per_lif;
2950 lif->nxqs = ionic->ntxqs_per_lif;
2951
2952 lif->ionic = ionic;
2953 lif->index = 0;
2954
2955 if (is_kdump_kernel()) {
2956 lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
2957 lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
2958 } else {
2959 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2960 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2961 }
2962
2963 /* Convert the default coalesce value to actual hw resolution */
2964 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2965 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2966 lif->rx_coalesce_usecs);
2967 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2968 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2969 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2970 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
2971
2972 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
2973
2974 spin_lock_init(&lif->adminq_lock);
2975
2976 spin_lock_init(&lif->deferred.lock);
2977 INIT_LIST_HEAD(&lif->deferred.list);
2978 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2979
2980 /* allocate lif info */
2981 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2982 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2983 &lif->info_pa, GFP_KERNEL);
2984 if (!lif->info) {
2985 dev_err(dev, "Failed to allocate lif info, aborting\n");
2986 err = -ENOMEM;
2987 goto err_out_free_netdev;
2988 }
2989
2990 ionic_debugfs_add_lif(lif);
2991
2992 /* allocate control queues and txrx queue arrays */
2993 ionic_lif_queue_identify(lif);
2994 err = ionic_qcqs_alloc(lif);
2995 if (err)
2996 goto err_out_free_lif_info;
2997
2998 /* allocate rss indirection table */
2999 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
3000 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
3001 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
3002 &lif->rss_ind_tbl_pa,
3003 GFP_KERNEL);
3004
3005 if (!lif->rss_ind_tbl) {
3006 err = -ENOMEM;
3007 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
3008 goto err_out_free_qcqs;
3009 }
3010 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
3011
3012 ionic_lif_alloc_phc(lif);
3013
3014 return 0;
3015
3016 err_out_free_qcqs:
3017 ionic_qcqs_free(lif);
3018 err_out_free_lif_info:
3019 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3020 lif->info = NULL;
3021 lif->info_pa = 0;
3022 err_out_free_netdev:
3023 free_netdev(lif->netdev);
3024 lif = NULL;
3025 err_out_free_lid:
3026 kfree(lid);
3027
3028 return err;
3029 }
3030
ionic_lif_reset(struct ionic_lif * lif)3031 static void ionic_lif_reset(struct ionic_lif *lif)
3032 {
3033 struct ionic_dev *idev = &lif->ionic->idev;
3034
3035 mutex_lock(&lif->ionic->dev_cmd_lock);
3036 ionic_dev_cmd_lif_reset(idev, lif->index);
3037 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3038 mutex_unlock(&lif->ionic->dev_cmd_lock);
3039 }
3040
ionic_lif_handle_fw_down(struct ionic_lif * lif)3041 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3042 {
3043 struct ionic *ionic = lif->ionic;
3044
3045 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3046 return;
3047
3048 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3049
3050 netif_device_detach(lif->netdev);
3051
3052 mutex_lock(&lif->queue_lock);
3053 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3054 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3055 ionic_stop_queues(lif);
3056 }
3057
3058 if (netif_running(lif->netdev)) {
3059 ionic_txrx_deinit(lif);
3060 ionic_txrx_free(lif);
3061 }
3062 ionic_lif_deinit(lif);
3063 ionic_reset(ionic);
3064 ionic_qcqs_free(lif);
3065
3066 mutex_unlock(&lif->queue_lock);
3067
3068 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3069 }
3070
ionic_lif_handle_fw_up(struct ionic_lif * lif)3071 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3072 {
3073 struct ionic *ionic = lif->ionic;
3074 int err;
3075
3076 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3077 return;
3078
3079 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3080
3081 ionic_init_devinfo(ionic);
3082 err = ionic_identify(ionic);
3083 if (err)
3084 goto err_out;
3085 err = ionic_port_identify(ionic);
3086 if (err)
3087 goto err_out;
3088 err = ionic_port_init(ionic);
3089 if (err)
3090 goto err_out;
3091
3092 mutex_lock(&lif->queue_lock);
3093
3094 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3095 dev_info(ionic->dev, "FW Up: clearing broken state\n");
3096
3097 err = ionic_qcqs_alloc(lif);
3098 if (err)
3099 goto err_unlock;
3100
3101 err = ionic_lif_init(lif);
3102 if (err)
3103 goto err_qcqs_free;
3104
3105 if (lif->registered)
3106 ionic_lif_set_netdev_info(lif);
3107
3108 ionic_rx_filter_replay(lif);
3109
3110 if (netif_running(lif->netdev)) {
3111 err = ionic_txrx_alloc(lif);
3112 if (err)
3113 goto err_lifs_deinit;
3114
3115 err = ionic_txrx_init(lif);
3116 if (err)
3117 goto err_txrx_free;
3118 }
3119
3120 mutex_unlock(&lif->queue_lock);
3121
3122 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3123 ionic_link_status_check_request(lif, CAN_SLEEP);
3124 netif_device_attach(lif->netdev);
3125 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3126
3127 /* restore the hardware timestamping queues */
3128 ionic_lif_hwstamp_replay(lif);
3129
3130 return;
3131
3132 err_txrx_free:
3133 ionic_txrx_free(lif);
3134 err_lifs_deinit:
3135 ionic_lif_deinit(lif);
3136 err_qcqs_free:
3137 ionic_qcqs_free(lif);
3138 err_unlock:
3139 mutex_unlock(&lif->queue_lock);
3140 err_out:
3141 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3142 }
3143
ionic_lif_free(struct ionic_lif * lif)3144 void ionic_lif_free(struct ionic_lif *lif)
3145 {
3146 struct device *dev = lif->ionic->dev;
3147
3148 ionic_lif_free_phc(lif);
3149
3150 /* free rss indirection table */
3151 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3152 lif->rss_ind_tbl_pa);
3153 lif->rss_ind_tbl = NULL;
3154 lif->rss_ind_tbl_pa = 0;
3155
3156 /* free queues */
3157 ionic_qcqs_free(lif);
3158 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3159 ionic_lif_reset(lif);
3160
3161 /* free lif info */
3162 kfree(lif->identity);
3163 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3164 lif->info = NULL;
3165 lif->info_pa = 0;
3166
3167 /* unmap doorbell page */
3168 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3169 lif->kern_dbpage = NULL;
3170 kfree(lif->dbid_inuse);
3171 lif->dbid_inuse = NULL;
3172
3173 /* free netdev & lif */
3174 ionic_debugfs_del_lif(lif);
3175 free_netdev(lif->netdev);
3176 }
3177
ionic_lif_deinit(struct ionic_lif * lif)3178 void ionic_lif_deinit(struct ionic_lif *lif)
3179 {
3180 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3181 return;
3182
3183 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3184 cancel_work_sync(&lif->deferred.work);
3185 cancel_work_sync(&lif->tx_timeout_work);
3186 ionic_rx_filters_deinit(lif);
3187 if (lif->netdev->features & NETIF_F_RXHASH)
3188 ionic_lif_rss_deinit(lif);
3189 }
3190
3191 napi_disable(&lif->adminqcq->napi);
3192 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3193 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3194
3195 mutex_destroy(&lif->config_lock);
3196 mutex_destroy(&lif->queue_lock);
3197 ionic_lif_reset(lif);
3198 }
3199
ionic_lif_adminq_init(struct ionic_lif * lif)3200 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3201 {
3202 struct device *dev = lif->ionic->dev;
3203 struct ionic_q_init_comp comp;
3204 struct ionic_dev *idev;
3205 struct ionic_qcq *qcq;
3206 struct ionic_queue *q;
3207 int err;
3208
3209 idev = &lif->ionic->idev;
3210 qcq = lif->adminqcq;
3211 q = &qcq->q;
3212
3213 mutex_lock(&lif->ionic->dev_cmd_lock);
3214 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3215 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3216 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3217 mutex_unlock(&lif->ionic->dev_cmd_lock);
3218 if (err) {
3219 netdev_err(lif->netdev, "adminq init failed %d\n", err);
3220 return err;
3221 }
3222
3223 q->hw_type = comp.hw_type;
3224 q->hw_index = le32_to_cpu(comp.hw_index);
3225 q->dbval = IONIC_DBELL_QID(q->hw_index);
3226
3227 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3228 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3229
3230 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
3231 NAPI_POLL_WEIGHT);
3232
3233 napi_enable(&qcq->napi);
3234
3235 if (qcq->flags & IONIC_QCQ_F_INTR)
3236 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3237 IONIC_INTR_MASK_CLEAR);
3238
3239 qcq->flags |= IONIC_QCQ_F_INITED;
3240
3241 return 0;
3242 }
3243
ionic_lif_notifyq_init(struct ionic_lif * lif)3244 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3245 {
3246 struct ionic_qcq *qcq = lif->notifyqcq;
3247 struct device *dev = lif->ionic->dev;
3248 struct ionic_queue *q = &qcq->q;
3249 int err;
3250
3251 struct ionic_admin_ctx ctx = {
3252 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3253 .cmd.q_init = {
3254 .opcode = IONIC_CMD_Q_INIT,
3255 .lif_index = cpu_to_le16(lif->index),
3256 .type = q->type,
3257 .ver = lif->qtype_info[q->type].version,
3258 .index = cpu_to_le32(q->index),
3259 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3260 IONIC_QINIT_F_ENA),
3261 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3262 .pid = cpu_to_le16(q->pid),
3263 .ring_size = ilog2(q->num_descs),
3264 .ring_base = cpu_to_le64(q->base_pa),
3265 }
3266 };
3267
3268 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3269 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3270 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3271 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3272
3273 err = ionic_adminq_post_wait(lif, &ctx);
3274 if (err)
3275 return err;
3276
3277 lif->last_eid = 0;
3278 q->hw_type = ctx.comp.q_init.hw_type;
3279 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3280 q->dbval = IONIC_DBELL_QID(q->hw_index);
3281
3282 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3283 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3284
3285 /* preset the callback info */
3286 q->info[0].cb_arg = lif;
3287
3288 qcq->flags |= IONIC_QCQ_F_INITED;
3289
3290 return 0;
3291 }
3292
ionic_station_set(struct ionic_lif * lif)3293 static int ionic_station_set(struct ionic_lif *lif)
3294 {
3295 struct net_device *netdev = lif->netdev;
3296 struct ionic_admin_ctx ctx = {
3297 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3298 .cmd.lif_getattr = {
3299 .opcode = IONIC_CMD_LIF_GETATTR,
3300 .index = cpu_to_le16(lif->index),
3301 .attr = IONIC_LIF_ATTR_MAC,
3302 },
3303 };
3304 u8 mac_address[ETH_ALEN];
3305 struct sockaddr addr;
3306 int err;
3307
3308 err = ionic_adminq_post_wait(lif, &ctx);
3309 if (err)
3310 return err;
3311 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3312 ctx.comp.lif_getattr.mac);
3313 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3314
3315 if (is_zero_ether_addr(mac_address)) {
3316 eth_hw_addr_random(netdev);
3317 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3318 ether_addr_copy(mac_address, netdev->dev_addr);
3319
3320 err = ionic_program_mac(lif, mac_address);
3321 if (err < 0)
3322 return err;
3323
3324 if (err > 0) {
3325 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3326 __func__);
3327 return 0;
3328 }
3329 }
3330
3331 if (!is_zero_ether_addr(netdev->dev_addr)) {
3332 /* If the netdev mac is non-zero and doesn't match the default
3333 * device address, it was set by something earlier and we're
3334 * likely here again after a fw-upgrade reset. We need to be
3335 * sure the netdev mac is in our filter list.
3336 */
3337 if (!ether_addr_equal(mac_address, netdev->dev_addr))
3338 ionic_lif_addr_add(lif, netdev->dev_addr);
3339 } else {
3340 /* Update the netdev mac with the device's mac */
3341 ether_addr_copy(addr.sa_data, mac_address);
3342 addr.sa_family = AF_INET;
3343 err = eth_prepare_mac_addr_change(netdev, &addr);
3344 if (err) {
3345 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3346 addr.sa_data, err);
3347 return 0;
3348 }
3349
3350 eth_commit_mac_addr_change(netdev, &addr);
3351 }
3352
3353 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3354 netdev->dev_addr);
3355 ionic_lif_addr_add(lif, netdev->dev_addr);
3356
3357 return 0;
3358 }
3359
ionic_lif_init(struct ionic_lif * lif)3360 int ionic_lif_init(struct ionic_lif *lif)
3361 {
3362 struct ionic_dev *idev = &lif->ionic->idev;
3363 struct device *dev = lif->ionic->dev;
3364 struct ionic_lif_init_comp comp;
3365 int dbpage_num;
3366 int err;
3367
3368 mutex_lock(&lif->ionic->dev_cmd_lock);
3369 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3370 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3371 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3372 mutex_unlock(&lif->ionic->dev_cmd_lock);
3373 if (err)
3374 return err;
3375
3376 lif->hw_index = le16_to_cpu(comp.hw_index);
3377 mutex_init(&lif->queue_lock);
3378 mutex_init(&lif->config_lock);
3379
3380 /* now that we have the hw_index we can figure out our doorbell page */
3381 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3382 if (!lif->dbid_count) {
3383 dev_err(dev, "No doorbell pages, aborting\n");
3384 return -EINVAL;
3385 }
3386
3387 lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
3388 if (!lif->dbid_inuse) {
3389 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
3390 return -ENOMEM;
3391 }
3392
3393 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
3394 set_bit(0, lif->dbid_inuse);
3395 lif->kern_pid = 0;
3396
3397 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3398 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3399 if (!lif->kern_dbpage) {
3400 dev_err(dev, "Cannot map dbpage, aborting\n");
3401 err = -ENOMEM;
3402 goto err_out_free_dbid;
3403 }
3404
3405 err = ionic_lif_adminq_init(lif);
3406 if (err)
3407 goto err_out_adminq_deinit;
3408
3409 if (lif->ionic->nnqs_per_lif) {
3410 err = ionic_lif_notifyq_init(lif);
3411 if (err)
3412 goto err_out_notifyq_deinit;
3413 }
3414
3415 err = ionic_init_nic_features(lif);
3416 if (err)
3417 goto err_out_notifyq_deinit;
3418
3419 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3420 err = ionic_rx_filters_init(lif);
3421 if (err)
3422 goto err_out_notifyq_deinit;
3423 }
3424
3425 err = ionic_station_set(lif);
3426 if (err)
3427 goto err_out_notifyq_deinit;
3428
3429 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3430
3431 set_bit(IONIC_LIF_F_INITED, lif->state);
3432
3433 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3434
3435 return 0;
3436
3437 err_out_notifyq_deinit:
3438 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3439 err_out_adminq_deinit:
3440 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3441 ionic_lif_reset(lif);
3442 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3443 lif->kern_dbpage = NULL;
3444 err_out_free_dbid:
3445 kfree(lif->dbid_inuse);
3446 lif->dbid_inuse = NULL;
3447
3448 return err;
3449 }
3450
ionic_lif_notify_work(struct work_struct * ws)3451 static void ionic_lif_notify_work(struct work_struct *ws)
3452 {
3453 }
3454
ionic_lif_set_netdev_info(struct ionic_lif * lif)3455 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3456 {
3457 struct ionic_admin_ctx ctx = {
3458 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3459 .cmd.lif_setattr = {
3460 .opcode = IONIC_CMD_LIF_SETATTR,
3461 .index = cpu_to_le16(lif->index),
3462 .attr = IONIC_LIF_ATTR_NAME,
3463 },
3464 };
3465
3466 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3467 sizeof(ctx.cmd.lif_setattr.name));
3468
3469 ionic_adminq_post_wait(lif, &ctx);
3470 }
3471
ionic_netdev_lif(struct net_device * netdev)3472 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3473 {
3474 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3475 return NULL;
3476
3477 return netdev_priv(netdev);
3478 }
3479
ionic_lif_notify(struct notifier_block * nb,unsigned long event,void * info)3480 static int ionic_lif_notify(struct notifier_block *nb,
3481 unsigned long event, void *info)
3482 {
3483 struct net_device *ndev = netdev_notifier_info_to_dev(info);
3484 struct ionic *ionic = container_of(nb, struct ionic, nb);
3485 struct ionic_lif *lif = ionic_netdev_lif(ndev);
3486
3487 if (!lif || lif->ionic != ionic)
3488 return NOTIFY_DONE;
3489
3490 switch (event) {
3491 case NETDEV_CHANGENAME:
3492 ionic_lif_set_netdev_info(lif);
3493 break;
3494 }
3495
3496 return NOTIFY_DONE;
3497 }
3498
ionic_lif_register(struct ionic_lif * lif)3499 int ionic_lif_register(struct ionic_lif *lif)
3500 {
3501 int err;
3502
3503 ionic_lif_register_phc(lif);
3504
3505 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3506
3507 lif->ionic->nb.notifier_call = ionic_lif_notify;
3508
3509 err = register_netdevice_notifier(&lif->ionic->nb);
3510 if (err)
3511 lif->ionic->nb.notifier_call = NULL;
3512
3513 /* only register LIF0 for now */
3514 err = register_netdev(lif->netdev);
3515 if (err) {
3516 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3517 ionic_lif_unregister_phc(lif);
3518 return err;
3519 }
3520
3521 ionic_link_status_check_request(lif, CAN_SLEEP);
3522 lif->registered = true;
3523 ionic_lif_set_netdev_info(lif);
3524
3525 return 0;
3526 }
3527
ionic_lif_unregister(struct ionic_lif * lif)3528 void ionic_lif_unregister(struct ionic_lif *lif)
3529 {
3530 if (lif->ionic->nb.notifier_call) {
3531 unregister_netdevice_notifier(&lif->ionic->nb);
3532 cancel_work_sync(&lif->ionic->nb_work);
3533 lif->ionic->nb.notifier_call = NULL;
3534 }
3535
3536 if (lif->netdev->reg_state == NETREG_REGISTERED)
3537 unregister_netdev(lif->netdev);
3538
3539 ionic_lif_unregister_phc(lif);
3540
3541 lif->registered = false;
3542 }
3543
ionic_lif_queue_identify(struct ionic_lif * lif)3544 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3545 {
3546 union ionic_q_identity __iomem *q_ident;
3547 struct ionic *ionic = lif->ionic;
3548 struct ionic_dev *idev;
3549 int qtype;
3550 int err;
3551
3552 idev = &lif->ionic->idev;
3553 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3554
3555 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3556 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3557
3558 /* filter out the ones we know about */
3559 switch (qtype) {
3560 case IONIC_QTYPE_ADMINQ:
3561 case IONIC_QTYPE_NOTIFYQ:
3562 case IONIC_QTYPE_RXQ:
3563 case IONIC_QTYPE_TXQ:
3564 break;
3565 default:
3566 continue;
3567 }
3568
3569 memset(qti, 0, sizeof(*qti));
3570
3571 mutex_lock(&ionic->dev_cmd_lock);
3572 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3573 ionic_qtype_versions[qtype]);
3574 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3575 if (!err) {
3576 qti->version = readb(&q_ident->version);
3577 qti->supported = readb(&q_ident->supported);
3578 qti->features = readq(&q_ident->features);
3579 qti->desc_sz = readw(&q_ident->desc_sz);
3580 qti->comp_sz = readw(&q_ident->comp_sz);
3581 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3582 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3583 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3584 }
3585 mutex_unlock(&ionic->dev_cmd_lock);
3586
3587 if (err == -EINVAL) {
3588 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3589 continue;
3590 } else if (err == -EIO) {
3591 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3592 return;
3593 } else if (err) {
3594 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3595 qtype, err);
3596 return;
3597 }
3598
3599 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3600 qtype, qti->version);
3601 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3602 qtype, qti->supported);
3603 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3604 qtype, qti->features);
3605 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3606 qtype, qti->desc_sz);
3607 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3608 qtype, qti->comp_sz);
3609 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3610 qtype, qti->sg_desc_sz);
3611 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3612 qtype, qti->max_sg_elems);
3613 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3614 qtype, qti->sg_desc_stride);
3615 }
3616 }
3617
ionic_lif_identify(struct ionic * ionic,u8 lif_type,union ionic_lif_identity * lid)3618 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3619 union ionic_lif_identity *lid)
3620 {
3621 struct ionic_dev *idev = &ionic->idev;
3622 size_t sz;
3623 int err;
3624
3625 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3626
3627 mutex_lock(&ionic->dev_cmd_lock);
3628 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3629 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3630 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3631 mutex_unlock(&ionic->dev_cmd_lock);
3632 if (err)
3633 return (err);
3634
3635 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3636 le64_to_cpu(lid->capabilities));
3637
3638 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3639 le32_to_cpu(lid->eth.max_ucast_filters));
3640 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3641 le32_to_cpu(lid->eth.max_mcast_filters));
3642 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3643 le64_to_cpu(lid->eth.config.features));
3644 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3645 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3646 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3647 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3648 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3649 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3650 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3651 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3652 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3653 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3654 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3655 le32_to_cpu(lid->eth.config.mtu));
3656
3657 return 0;
3658 }
3659
ionic_lif_size(struct ionic * ionic)3660 int ionic_lif_size(struct ionic *ionic)
3661 {
3662 struct ionic_identity *ident = &ionic->ident;
3663 unsigned int nintrs, dev_nintrs;
3664 union ionic_lif_config *lc;
3665 unsigned int ntxqs_per_lif;
3666 unsigned int nrxqs_per_lif;
3667 unsigned int neqs_per_lif;
3668 unsigned int nnqs_per_lif;
3669 unsigned int nxqs, neqs;
3670 unsigned int min_intrs;
3671 int err;
3672
3673 /* retrieve basic values from FW */
3674 lc = &ident->lif.eth.config;
3675 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3676 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3677 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3678 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3679 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3680
3681 /* limit values to play nice with kdump */
3682 if (is_kdump_kernel()) {
3683 dev_nintrs = 2;
3684 neqs_per_lif = 0;
3685 nnqs_per_lif = 0;
3686 ntxqs_per_lif = 1;
3687 nrxqs_per_lif = 1;
3688 }
3689
3690 /* reserve last queue id for hardware timestamping */
3691 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3692 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3693 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3694 } else {
3695 ntxqs_per_lif -= 1;
3696 nrxqs_per_lif -= 1;
3697 }
3698 }
3699
3700 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3701 nxqs = min(nxqs, num_online_cpus());
3702 neqs = min(neqs_per_lif, num_online_cpus());
3703
3704 try_again:
3705 /* interrupt usage:
3706 * 1 for master lif adminq/notifyq
3707 * 1 for each CPU for master lif TxRx queue pairs
3708 * whatever's left is for RDMA queues
3709 */
3710 nintrs = 1 + nxqs + neqs;
3711 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3712
3713 if (nintrs > dev_nintrs)
3714 goto try_fewer;
3715
3716 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3717 if (err < 0 && err != -ENOSPC) {
3718 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3719 return err;
3720 }
3721 if (err == -ENOSPC)
3722 goto try_fewer;
3723
3724 if (err != nintrs) {
3725 ionic_bus_free_irq_vectors(ionic);
3726 goto try_fewer;
3727 }
3728
3729 ionic->nnqs_per_lif = nnqs_per_lif;
3730 ionic->neqs_per_lif = neqs;
3731 ionic->ntxqs_per_lif = nxqs;
3732 ionic->nrxqs_per_lif = nxqs;
3733 ionic->nintrs = nintrs;
3734
3735 ionic_debugfs_add_sizes(ionic);
3736
3737 return 0;
3738
3739 try_fewer:
3740 if (nnqs_per_lif > 1) {
3741 nnqs_per_lif >>= 1;
3742 goto try_again;
3743 }
3744 if (neqs > 1) {
3745 neqs >>= 1;
3746 goto try_again;
3747 }
3748 if (nxqs > 1) {
3749 nxqs >>= 1;
3750 goto try_again;
3751 }
3752 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3753 return -ENOSPC;
3754 }
3755