1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 * Copyright 2019-2020 Xilinx Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
10 */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 #include "ef100_sriov.h"
26 #include "tc_bindings.h"
27 #include "tc_encap_actions.h"
28 #include "efx_devlink.h"
29
ef100_update_name(struct efx_nic * efx)30 static void ef100_update_name(struct efx_nic *efx)
31 {
32 strcpy(efx->name, efx->net_dev->name);
33 }
34
ef100_alloc_vis(struct efx_nic * efx,unsigned int * allocated_vis)35 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
36 {
37 /* EF100 uses a single TXQ per channel, as all checksum offloading
38 * is configured in the TX descriptor, and there is no TX Pacer for
39 * HIGHPRI queues.
40 */
41 unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
42 unsigned int rx_vis = efx->n_rx_channels;
43 unsigned int min_vis, max_vis;
44 int rc;
45
46 EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
47
48 tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
49
50 max_vis = max(rx_vis, tx_vis);
51 /* We require at least a single complete TX channel worth of queues. */
52 min_vis = efx->tx_queues_per_channel;
53
54 rc = efx_mcdi_alloc_vis(efx, min_vis, max_vis,
55 NULL, allocated_vis);
56
57 /* We retry allocating VIs by reallocating channels when we have not
58 * been able to allocate the maximum VIs.
59 */
60 if (!rc && *allocated_vis < max_vis)
61 rc = -EAGAIN;
62
63 return rc;
64 }
65
ef100_remap_bar(struct efx_nic * efx,int max_vis)66 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
67 {
68 unsigned int uc_mem_map_size;
69 void __iomem *membase;
70
71 efx->max_vis = max_vis;
72 uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
73
74 /* Extend the original UC mapping of the memory BAR */
75 membase = ioremap(efx->membase_phys, uc_mem_map_size);
76 if (!membase) {
77 netif_err(efx, probe, efx->net_dev,
78 "could not extend memory BAR to %x\n",
79 uc_mem_map_size);
80 return -ENOMEM;
81 }
82 iounmap(efx->membase);
83 efx->membase = membase;
84 return 0;
85 }
86
87 /* Context: process, rtnl_lock() held.
88 * Note that the kernel will ignore our return code; this method
89 * should really be a void.
90 */
ef100_net_stop(struct net_device * net_dev)91 static int ef100_net_stop(struct net_device *net_dev)
92 {
93 struct efx_nic *efx = efx_netdev_priv(net_dev);
94
95 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
96 raw_smp_processor_id());
97
98 efx_detach_reps(efx);
99 netif_stop_queue(net_dev);
100 efx_stop_all(efx);
101 efx_mcdi_mac_fini_stats(efx);
102 efx_disable_interrupts(efx);
103 efx_clear_interrupt_affinity(efx);
104 efx_nic_fini_interrupt(efx);
105 efx_remove_filters(efx);
106 efx_fini_napi(efx);
107 efx_remove_channels(efx);
108 efx_mcdi_free_vis(efx);
109 efx_remove_interrupts(efx);
110
111 efx->state = STATE_NET_DOWN;
112
113 return 0;
114 }
115
116 /* Context: process, rtnl_lock() held. */
ef100_net_open(struct net_device * net_dev)117 static int ef100_net_open(struct net_device *net_dev)
118 {
119 struct efx_nic *efx = efx_netdev_priv(net_dev);
120 unsigned int allocated_vis;
121 int rc;
122
123 ef100_update_name(efx);
124 netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
125 raw_smp_processor_id());
126
127 rc = efx_check_disabled(efx);
128 if (rc)
129 goto fail;
130
131 rc = efx_probe_interrupts(efx);
132 if (rc)
133 goto fail;
134
135 rc = efx_set_channels(efx);
136 if (rc)
137 goto fail;
138
139 rc = efx_mcdi_free_vis(efx);
140 if (rc)
141 goto fail;
142
143 rc = ef100_alloc_vis(efx, &allocated_vis);
144 if (rc && rc != -EAGAIN)
145 goto fail;
146
147 /* Try one more time but with the maximum number of channels
148 * equal to the allocated VIs, which would more likely succeed.
149 */
150 if (rc == -EAGAIN) {
151 rc = efx_mcdi_free_vis(efx);
152 if (rc)
153 goto fail;
154
155 efx_remove_interrupts(efx);
156 efx->max_channels = allocated_vis;
157
158 rc = efx_probe_interrupts(efx);
159 if (rc)
160 goto fail;
161
162 rc = efx_set_channels(efx);
163 if (rc)
164 goto fail;
165
166 rc = ef100_alloc_vis(efx, &allocated_vis);
167 if (rc && rc != -EAGAIN)
168 goto fail;
169
170 /* It should be very unlikely that we failed here again, but in
171 * such a case we return ENOSPC.
172 */
173 if (rc == -EAGAIN) {
174 rc = -ENOSPC;
175 goto fail;
176 }
177 }
178
179 rc = efx_probe_channels(efx);
180 if (rc)
181 return rc;
182
183 rc = ef100_remap_bar(efx, allocated_vis);
184 if (rc)
185 goto fail;
186
187 efx_init_napi(efx);
188
189 rc = efx_probe_filters(efx);
190 if (rc)
191 goto fail;
192
193 rc = efx_nic_init_interrupt(efx);
194 if (rc)
195 goto fail;
196 efx_set_interrupt_affinity(efx);
197
198 rc = efx_enable_interrupts(efx);
199 if (rc)
200 goto fail;
201
202 /* in case the MC rebooted while we were stopped, consume the change
203 * to the warm reboot count
204 */
205 (void) efx_mcdi_poll_reboot(efx);
206
207 rc = efx_mcdi_mac_init_stats(efx);
208 if (rc)
209 goto fail;
210
211 efx_start_all(efx);
212
213 /* Link state detection is normally event-driven; we have
214 * to poll now because we could have missed a change
215 */
216 mutex_lock(&efx->mac_lock);
217 if (efx_mcdi_phy_poll(efx))
218 efx_link_status_changed(efx);
219 mutex_unlock(&efx->mac_lock);
220
221 efx->state = STATE_NET_UP;
222 if (netif_running(efx->net_dev))
223 efx_attach_reps(efx);
224
225 return 0;
226
227 fail:
228 ef100_net_stop(net_dev);
229 return rc;
230 }
231
232 /* Initiate a packet transmission. We use one channel per CPU
233 * (sharing when we have more CPUs than channels).
234 *
235 * Context: non-blocking.
236 * Note that returning anything other than NETDEV_TX_OK will cause the
237 * OS to free the skb.
238 */
ef100_hard_start_xmit(struct sk_buff * skb,struct net_device * net_dev)239 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
240 struct net_device *net_dev)
241 {
242 struct efx_nic *efx = efx_netdev_priv(net_dev);
243
244 return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
245 }
246
__ef100_hard_start_xmit(struct sk_buff * skb,struct efx_nic * efx,struct net_device * net_dev,struct efx_rep * efv)247 netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
248 struct efx_nic *efx,
249 struct net_device *net_dev,
250 struct efx_rep *efv)
251 {
252 struct efx_tx_queue *tx_queue;
253 struct efx_channel *channel;
254 int rc;
255
256 channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
257 netif_vdbg(efx, tx_queued, efx->net_dev,
258 "%s len %d data %d channel %d\n", __func__,
259 skb->len, skb->data_len, channel->channel);
260 if (!efx->n_channels || !efx->n_tx_channels || !channel) {
261 netif_stop_queue(net_dev);
262 dev_kfree_skb_any(skb);
263 goto err;
264 }
265
266 tx_queue = &channel->tx_queue[0];
267 rc = __ef100_enqueue_skb(tx_queue, skb, efv);
268 if (rc == 0)
269 return NETDEV_TX_OK;
270
271 err:
272 net_dev->stats.tx_dropped++;
273 return NETDEV_TX_OK;
274 }
275
276 static const struct net_device_ops ef100_netdev_ops = {
277 .ndo_open = ef100_net_open,
278 .ndo_stop = ef100_net_stop,
279 .ndo_start_xmit = ef100_hard_start_xmit,
280 .ndo_tx_timeout = efx_watchdog,
281 .ndo_get_stats64 = efx_net_stats,
282 .ndo_change_mtu = efx_change_mtu,
283 .ndo_validate_addr = eth_validate_addr,
284 .ndo_set_mac_address = efx_set_mac_address,
285 .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */
286 .ndo_set_features = efx_set_features,
287 .ndo_get_phys_port_id = efx_get_phys_port_id,
288 .ndo_get_phys_port_name = efx_get_phys_port_name,
289 #ifdef CONFIG_RFS_ACCEL
290 .ndo_rx_flow_steer = efx_filter_rfs,
291 #endif
292 #ifdef CONFIG_SFC_SRIOV
293 .ndo_setup_tc = efx_tc_setup,
294 #endif
295 };
296
297 /* Netdev registration
298 */
ef100_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)299 int ef100_netdev_event(struct notifier_block *this,
300 unsigned long event, void *ptr)
301 {
302 struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
303 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
304 struct ef100_nic_data *nic_data = efx->nic_data;
305 int err;
306
307 if (efx->net_dev == net_dev &&
308 (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
309 ef100_update_name(efx);
310
311 if (!nic_data->grp_mae)
312 return NOTIFY_DONE;
313 err = efx_tc_netdev_event(efx, event, net_dev);
314 if (err & NOTIFY_STOP_MASK)
315 return err;
316
317 return NOTIFY_DONE;
318 }
319
ef100_netevent_event(struct notifier_block * this,unsigned long event,void * ptr)320 static int ef100_netevent_event(struct notifier_block *this,
321 unsigned long event, void *ptr)
322 {
323 struct efx_nic *efx = container_of(this, struct efx_nic, netevent_notifier);
324 struct ef100_nic_data *nic_data = efx->nic_data;
325 int err;
326
327 if (!nic_data->grp_mae)
328 return NOTIFY_DONE;
329 err = efx_tc_netevent_event(efx, event, ptr);
330 if (err & NOTIFY_STOP_MASK)
331 return err;
332
333 return NOTIFY_DONE;
334 };
335
ef100_register_netdev(struct efx_nic * efx)336 static int ef100_register_netdev(struct efx_nic *efx)
337 {
338 struct net_device *net_dev = efx->net_dev;
339 int rc;
340
341 net_dev->watchdog_timeo = 5 * HZ;
342 net_dev->irq = efx->pci_dev->irq;
343 net_dev->netdev_ops = &ef100_netdev_ops;
344 net_dev->min_mtu = EFX_MIN_MTU;
345 net_dev->max_mtu = EFX_MAX_MTU;
346 net_dev->ethtool_ops = &ef100_ethtool_ops;
347
348 rtnl_lock();
349
350 rc = dev_alloc_name(net_dev, net_dev->name);
351 if (rc < 0)
352 goto fail_locked;
353 ef100_update_name(efx);
354
355 rc = register_netdevice(net_dev);
356 if (rc)
357 goto fail_locked;
358
359 /* Always start with carrier off; PHY events will detect the link */
360 netif_carrier_off(net_dev);
361
362 efx->state = STATE_NET_DOWN;
363 rtnl_unlock();
364 efx_init_mcdi_logging(efx);
365
366 return 0;
367
368 fail_locked:
369 rtnl_unlock();
370 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
371 return rc;
372 }
373
ef100_unregister_netdev(struct efx_nic * efx)374 static void ef100_unregister_netdev(struct efx_nic *efx)
375 {
376 if (efx_dev_registered(efx)) {
377 efx_fini_mcdi_logging(efx);
378 efx->state = STATE_PROBED;
379 unregister_netdev(efx->net_dev);
380 }
381 }
382
ef100_remove_netdev(struct efx_probe_data * probe_data)383 void ef100_remove_netdev(struct efx_probe_data *probe_data)
384 {
385 struct efx_nic *efx = &probe_data->efx;
386
387 if (!efx->net_dev)
388 return;
389
390 rtnl_lock();
391 dev_close(efx->net_dev);
392 rtnl_unlock();
393
394 unregister_netdevice_notifier(&efx->netdev_notifier);
395 unregister_netevent_notifier(&efx->netevent_notifier);
396 #if defined(CONFIG_SFC_SRIOV)
397 if (!efx->type->is_vf)
398 efx_ef100_pci_sriov_disable(efx, true);
399 #endif
400
401 efx_fini_devlink_lock(efx);
402 ef100_unregister_netdev(efx);
403
404 #ifdef CONFIG_SFC_SRIOV
405 ef100_pf_unset_devlink_port(efx);
406 efx_fini_tc(efx);
407 #endif
408
409 down_write(&efx->filter_sem);
410 efx_mcdi_filter_table_remove(efx);
411 up_write(&efx->filter_sem);
412 efx_fini_channels(efx);
413 kfree(efx->phy_data);
414 efx->phy_data = NULL;
415
416 efx_fini_devlink_and_unlock(efx);
417
418 free_netdev(efx->net_dev);
419 efx->net_dev = NULL;
420 efx->state = STATE_PROBED;
421 }
422
ef100_probe_netdev(struct efx_probe_data * probe_data)423 int ef100_probe_netdev(struct efx_probe_data *probe_data)
424 {
425 struct efx_nic *efx = &probe_data->efx;
426 struct efx_probe_data **probe_ptr;
427 struct ef100_nic_data *nic_data;
428 struct net_device *net_dev;
429 int rc;
430
431 if (efx->mcdi->fn_flags &
432 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
433 pci_info(efx->pci_dev, "No network port on this PCI function");
434 return 0;
435 }
436
437 /* Allocate and initialise a struct net_device */
438 net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
439 if (!net_dev)
440 return -ENOMEM;
441 probe_ptr = netdev_priv(net_dev);
442 *probe_ptr = probe_data;
443 efx->net_dev = net_dev;
444 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
445
446 /* enable all supported features except rx-fcs and rx-all */
447 net_dev->features |= efx->type->offload_features &
448 ~(NETIF_F_RXFCS | NETIF_F_RXALL);
449 net_dev->hw_features |= efx->type->offload_features;
450 net_dev->hw_enc_features |= efx->type->offload_features;
451 net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
452 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
453 netif_set_tso_max_segs(net_dev,
454 ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
455 efx->mdio.dev = net_dev;
456
457 rc = efx_ef100_init_datapath_caps(efx);
458 if (rc < 0)
459 goto fail;
460
461 rc = ef100_phy_probe(efx);
462 if (rc)
463 goto fail;
464
465 rc = efx_init_channels(efx);
466 if (rc)
467 goto fail;
468
469 down_write(&efx->filter_sem);
470 rc = ef100_filter_table_probe(efx);
471 up_write(&efx->filter_sem);
472 if (rc)
473 goto fail;
474
475 netdev_rss_key_fill(efx->rss_context.rx_hash_key,
476 sizeof(efx->rss_context.rx_hash_key));
477
478 /* Don't fail init if RSS setup doesn't work. */
479 efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
480
481 nic_data = efx->nic_data;
482 rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
483 efx->type->is_vf);
484 if (rc)
485 return rc;
486 /* Assign MAC address */
487 eth_hw_addr_set(net_dev, net_dev->perm_addr);
488 ether_addr_copy(nic_data->port_id, net_dev->perm_addr);
489
490 /* devlink creation, registration and lock */
491 rc = efx_probe_devlink_and_lock(efx);
492 if (rc)
493 pci_info(efx->pci_dev, "devlink registration failed");
494
495 rc = ef100_register_netdev(efx);
496 if (rc)
497 goto fail;
498
499 if (!efx->type->is_vf) {
500 rc = ef100_probe_netdev_pf(efx);
501 if (rc)
502 goto fail;
503 #ifdef CONFIG_SFC_SRIOV
504 ef100_pf_set_devlink_port(efx);
505 #endif
506 }
507
508 efx->netdev_notifier.notifier_call = ef100_netdev_event;
509 rc = register_netdevice_notifier(&efx->netdev_notifier);
510 if (rc) {
511 netif_err(efx, probe, efx->net_dev,
512 "Failed to register netdevice notifier, rc=%d\n", rc);
513 goto fail;
514 }
515
516 efx->netevent_notifier.notifier_call = ef100_netevent_event;
517 rc = register_netevent_notifier(&efx->netevent_notifier);
518 if (rc) {
519 netif_err(efx, probe, efx->net_dev,
520 "Failed to register netevent notifier, rc=%d\n", rc);
521 goto fail;
522 }
523
524 efx_probe_devlink_unlock(efx);
525 return rc;
526 fail:
527 #ifdef CONFIG_SFC_SRIOV
528 /* remove devlink port if does exist */
529 ef100_pf_unset_devlink_port(efx);
530 #endif
531 efx_probe_devlink_unlock(efx);
532 return rc;
533 }
534