1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/ipv6.h>
6 #include <linux/types.h>
7 #include <net/netdev_queues.h>
8
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_txrx.h"
12
__fbnic_open(struct fbnic_net * fbn)13 int __fbnic_open(struct fbnic_net *fbn)
14 {
15 struct fbnic_dev *fbd = fbn->fbd;
16 int err;
17
18 err = fbnic_alloc_napi_vectors(fbn);
19 if (err)
20 return err;
21
22 err = fbnic_alloc_resources(fbn);
23 if (err)
24 goto free_napi_vectors;
25
26 err = netif_set_real_num_tx_queues(fbn->netdev,
27 fbn->num_tx_queues);
28 if (err)
29 goto free_resources;
30
31 err = netif_set_real_num_rx_queues(fbn->netdev,
32 fbn->num_rx_queues);
33 if (err)
34 goto free_resources;
35
36 /* Send ownership message and flush to verify FW has seen it */
37 err = fbnic_fw_xmit_ownership_msg(fbd, true);
38 if (err) {
39 dev_warn(fbd->dev,
40 "Error %d sending host ownership message to the firmware\n",
41 err);
42 goto free_resources;
43 }
44
45 err = fbnic_fw_init_heartbeat(fbd, false);
46 if (err)
47 goto release_ownership;
48
49 err = fbnic_pcs_irq_enable(fbd);
50 if (err)
51 goto release_ownership;
52 /* Pull the BMC config and initialize the RPC */
53 fbnic_bmc_rpc_init(fbd);
54 fbnic_rss_reinit(fbd, fbn);
55
56 phylink_resume(fbn->phylink);
57
58 return 0;
59 release_ownership:
60 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
61 free_resources:
62 fbnic_free_resources(fbn);
63 free_napi_vectors:
64 fbnic_free_napi_vectors(fbn);
65 return err;
66 }
67
fbnic_open(struct net_device * netdev)68 static int fbnic_open(struct net_device *netdev)
69 {
70 struct fbnic_net *fbn = netdev_priv(netdev);
71 int err;
72
73 err = __fbnic_open(fbn);
74 if (!err)
75 fbnic_up(fbn);
76
77 return err;
78 }
79
fbnic_stop(struct net_device * netdev)80 static int fbnic_stop(struct net_device *netdev)
81 {
82 struct fbnic_net *fbn = netdev_priv(netdev);
83
84 phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
85
86 fbnic_down(fbn);
87 fbnic_pcs_irq_disable(fbn->fbd);
88
89 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
90
91 fbnic_free_resources(fbn);
92 fbnic_free_napi_vectors(fbn);
93
94 return 0;
95 }
96
fbnic_uc_sync(struct net_device * netdev,const unsigned char * addr)97 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
98 {
99 struct fbnic_net *fbn = netdev_priv(netdev);
100 struct fbnic_mac_addr *avail_addr;
101
102 if (WARN_ON(!is_valid_ether_addr(addr)))
103 return -EADDRNOTAVAIL;
104
105 avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
106 if (!avail_addr)
107 return -ENOSPC;
108
109 /* Add type flag indicating this address is in use by the host */
110 set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
111
112 return 0;
113 }
114
fbnic_uc_unsync(struct net_device * netdev,const unsigned char * addr)115 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
116 {
117 struct fbnic_net *fbn = netdev_priv(netdev);
118 struct fbnic_dev *fbd = fbn->fbd;
119 int i, ret;
120
121 /* Scan from middle of list to bottom, filling bottom up.
122 * Skip the first entry which is reserved for dev_addr and
123 * leave the last entry to use for promiscuous filtering.
124 */
125 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
126 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
127 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
128
129 if (!ether_addr_equal(mac_addr->value.addr8, addr))
130 continue;
131
132 ret = __fbnic_uc_unsync(mac_addr);
133 }
134
135 return ret;
136 }
137
fbnic_mc_sync(struct net_device * netdev,const unsigned char * addr)138 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
139 {
140 struct fbnic_net *fbn = netdev_priv(netdev);
141 struct fbnic_mac_addr *avail_addr;
142
143 if (WARN_ON(!is_multicast_ether_addr(addr)))
144 return -EADDRNOTAVAIL;
145
146 avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
147 if (!avail_addr)
148 return -ENOSPC;
149
150 /* Add type flag indicating this address is in use by the host */
151 set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
152
153 return 0;
154 }
155
fbnic_mc_unsync(struct net_device * netdev,const unsigned char * addr)156 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
157 {
158 struct fbnic_net *fbn = netdev_priv(netdev);
159 struct fbnic_dev *fbd = fbn->fbd;
160 int i, ret;
161
162 /* Scan from middle of list to top, filling top down.
163 * Skip over the address reserved for the BMC MAC and
164 * exclude index 0 as that belongs to the broadcast address
165 */
166 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
167 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
168 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
169
170 if (!ether_addr_equal(mac_addr->value.addr8, addr))
171 continue;
172
173 ret = __fbnic_mc_unsync(mac_addr);
174 }
175
176 return ret;
177 }
178
__fbnic_set_rx_mode(struct net_device * netdev)179 void __fbnic_set_rx_mode(struct net_device *netdev)
180 {
181 struct fbnic_net *fbn = netdev_priv(netdev);
182 bool uc_promisc = false, mc_promisc = false;
183 struct fbnic_dev *fbd = fbn->fbd;
184 struct fbnic_mac_addr *mac_addr;
185 int err;
186
187 /* Populate host address from dev_addr */
188 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
189 if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
190 mac_addr->state != FBNIC_TCAM_S_VALID) {
191 ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
192 mac_addr->state = FBNIC_TCAM_S_UPDATE;
193 set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
194 }
195
196 /* Populate broadcast address if broadcast is enabled */
197 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
198 if (netdev->flags & IFF_BROADCAST) {
199 if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
200 mac_addr->state != FBNIC_TCAM_S_VALID) {
201 eth_broadcast_addr(mac_addr->value.addr8);
202 mac_addr->state = FBNIC_TCAM_S_ADD;
203 }
204 set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
205 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
206 __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
207 }
208
209 /* Synchronize unicast and multicast address lists */
210 err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
211 if (err == -ENOSPC)
212 uc_promisc = true;
213 err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
214 if (err == -ENOSPC)
215 mc_promisc = true;
216
217 uc_promisc |= !!(netdev->flags & IFF_PROMISC);
218 mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
219
220 /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
221 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
222 if (uc_promisc) {
223 if (!is_zero_ether_addr(mac_addr->value.addr8) ||
224 mac_addr->state != FBNIC_TCAM_S_VALID) {
225 eth_zero_addr(mac_addr->value.addr8);
226 eth_broadcast_addr(mac_addr->mask.addr8);
227 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
228 mac_addr->act_tcam);
229 set_bit(FBNIC_MAC_ADDR_T_PROMISC,
230 mac_addr->act_tcam);
231 mac_addr->state = FBNIC_TCAM_S_ADD;
232 }
233 } else if (mc_promisc &&
234 (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
235 /* We have to add a special handler for multicast as the
236 * BMC may have an all-multi rule already in place. As such
237 * adding a rule ourselves won't do any good so we will have
238 * to modify the rules for the ALL MULTI below if the BMC
239 * already has the rule in place.
240 */
241 if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
242 mac_addr->state != FBNIC_TCAM_S_VALID) {
243 eth_zero_addr(mac_addr->value.addr8);
244 eth_broadcast_addr(mac_addr->mask.addr8);
245 mac_addr->value.addr8[0] ^= 1;
246 mac_addr->mask.addr8[0] ^= 1;
247 set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
248 mac_addr->act_tcam);
249 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
250 mac_addr->act_tcam);
251 mac_addr->state = FBNIC_TCAM_S_ADD;
252 }
253 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
254 if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
255 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
256 mac_addr->act_tcam);
257 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
258 mac_addr->act_tcam);
259 } else {
260 mac_addr->state = FBNIC_TCAM_S_DELETE;
261 }
262 }
263
264 /* Add rules for BMC all multicast if it is enabled */
265 fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
266
267 /* Sift out any unshared BMC rules and place them in BMC only section */
268 fbnic_sift_macda(fbd);
269
270 /* Write updates to hardware */
271 fbnic_write_rules(fbd);
272 fbnic_write_macda(fbd);
273 }
274
fbnic_set_rx_mode(struct net_device * netdev)275 static void fbnic_set_rx_mode(struct net_device *netdev)
276 {
277 /* No need to update the hardware if we are not running */
278 if (netif_running(netdev))
279 __fbnic_set_rx_mode(netdev);
280 }
281
fbnic_set_mac(struct net_device * netdev,void * p)282 static int fbnic_set_mac(struct net_device *netdev, void *p)
283 {
284 struct sockaddr *addr = p;
285
286 if (!is_valid_ether_addr(addr->sa_data))
287 return -EADDRNOTAVAIL;
288
289 eth_hw_addr_set(netdev, addr->sa_data);
290
291 fbnic_set_rx_mode(netdev);
292
293 return 0;
294 }
295
fbnic_clear_rx_mode(struct net_device * netdev)296 void fbnic_clear_rx_mode(struct net_device *netdev)
297 {
298 struct fbnic_net *fbn = netdev_priv(netdev);
299 struct fbnic_dev *fbd = fbn->fbd;
300 int idx;
301
302 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
303 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
304
305 if (mac_addr->state != FBNIC_TCAM_S_VALID)
306 continue;
307
308 bitmap_clear(mac_addr->act_tcam,
309 FBNIC_MAC_ADDR_T_HOST_START,
310 FBNIC_MAC_ADDR_T_HOST_LEN);
311
312 if (bitmap_empty(mac_addr->act_tcam,
313 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
314 mac_addr->state = FBNIC_TCAM_S_DELETE;
315 }
316
317 /* Write updates to hardware */
318 fbnic_write_macda(fbd);
319
320 __dev_uc_unsync(netdev, NULL);
321 __dev_mc_unsync(netdev, NULL);
322 }
323
fbnic_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)324 static void fbnic_get_stats64(struct net_device *dev,
325 struct rtnl_link_stats64 *stats64)
326 {
327 u64 tx_bytes, tx_packets, tx_dropped = 0;
328 u64 rx_bytes, rx_packets, rx_dropped = 0;
329 struct fbnic_net *fbn = netdev_priv(dev);
330 struct fbnic_queue_stats *stats;
331 unsigned int start, i;
332
333 stats = &fbn->tx_stats;
334
335 tx_bytes = stats->bytes;
336 tx_packets = stats->packets;
337 tx_dropped = stats->dropped;
338
339 stats64->tx_bytes = tx_bytes;
340 stats64->tx_packets = tx_packets;
341 stats64->tx_dropped = tx_dropped;
342
343 for (i = 0; i < fbn->num_tx_queues; i++) {
344 struct fbnic_ring *txr = fbn->tx[i];
345
346 if (!txr)
347 continue;
348
349 stats = &txr->stats;
350 do {
351 start = u64_stats_fetch_begin(&stats->syncp);
352 tx_bytes = stats->bytes;
353 tx_packets = stats->packets;
354 tx_dropped = stats->dropped;
355 } while (u64_stats_fetch_retry(&stats->syncp, start));
356
357 stats64->tx_bytes += tx_bytes;
358 stats64->tx_packets += tx_packets;
359 stats64->tx_dropped += tx_dropped;
360 }
361
362 stats = &fbn->rx_stats;
363
364 rx_bytes = stats->bytes;
365 rx_packets = stats->packets;
366 rx_dropped = stats->dropped;
367
368 stats64->rx_bytes = rx_bytes;
369 stats64->rx_packets = rx_packets;
370 stats64->rx_dropped = rx_dropped;
371
372 for (i = 0; i < fbn->num_rx_queues; i++) {
373 struct fbnic_ring *rxr = fbn->rx[i];
374
375 if (!rxr)
376 continue;
377
378 stats = &rxr->stats;
379 do {
380 start = u64_stats_fetch_begin(&stats->syncp);
381 rx_bytes = stats->bytes;
382 rx_packets = stats->packets;
383 rx_dropped = stats->dropped;
384 } while (u64_stats_fetch_retry(&stats->syncp, start));
385
386 stats64->rx_bytes += rx_bytes;
387 stats64->rx_packets += rx_packets;
388 stats64->rx_dropped += rx_dropped;
389 }
390 }
391
392 static const struct net_device_ops fbnic_netdev_ops = {
393 .ndo_open = fbnic_open,
394 .ndo_stop = fbnic_stop,
395 .ndo_validate_addr = eth_validate_addr,
396 .ndo_start_xmit = fbnic_xmit_frame,
397 .ndo_features_check = fbnic_features_check,
398 .ndo_set_mac_address = fbnic_set_mac,
399 .ndo_set_rx_mode = fbnic_set_rx_mode,
400 .ndo_get_stats64 = fbnic_get_stats64,
401 };
402
fbnic_get_queue_stats_rx(struct net_device * dev,int idx,struct netdev_queue_stats_rx * rx)403 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
404 struct netdev_queue_stats_rx *rx)
405 {
406 struct fbnic_net *fbn = netdev_priv(dev);
407 struct fbnic_ring *rxr = fbn->rx[idx];
408 struct fbnic_queue_stats *stats;
409 unsigned int start;
410 u64 bytes, packets;
411
412 if (!rxr)
413 return;
414
415 stats = &rxr->stats;
416 do {
417 start = u64_stats_fetch_begin(&stats->syncp);
418 bytes = stats->bytes;
419 packets = stats->packets;
420 } while (u64_stats_fetch_retry(&stats->syncp, start));
421
422 rx->bytes = bytes;
423 rx->packets = packets;
424 }
425
fbnic_get_queue_stats_tx(struct net_device * dev,int idx,struct netdev_queue_stats_tx * tx)426 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
427 struct netdev_queue_stats_tx *tx)
428 {
429 struct fbnic_net *fbn = netdev_priv(dev);
430 struct fbnic_ring *txr = fbn->tx[idx];
431 struct fbnic_queue_stats *stats;
432 unsigned int start;
433 u64 bytes, packets;
434
435 if (!txr)
436 return;
437
438 stats = &txr->stats;
439 do {
440 start = u64_stats_fetch_begin(&stats->syncp);
441 bytes = stats->bytes;
442 packets = stats->packets;
443 } while (u64_stats_fetch_retry(&stats->syncp, start));
444
445 tx->bytes = bytes;
446 tx->packets = packets;
447 }
448
fbnic_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)449 static void fbnic_get_base_stats(struct net_device *dev,
450 struct netdev_queue_stats_rx *rx,
451 struct netdev_queue_stats_tx *tx)
452 {
453 struct fbnic_net *fbn = netdev_priv(dev);
454
455 tx->bytes = fbn->tx_stats.bytes;
456 tx->packets = fbn->tx_stats.packets;
457
458 rx->bytes = fbn->rx_stats.bytes;
459 rx->packets = fbn->rx_stats.packets;
460 }
461
462 static const struct netdev_stat_ops fbnic_stat_ops = {
463 .get_queue_stats_rx = fbnic_get_queue_stats_rx,
464 .get_queue_stats_tx = fbnic_get_queue_stats_tx,
465 .get_base_stats = fbnic_get_base_stats,
466 };
467
fbnic_reset_queues(struct fbnic_net * fbn,unsigned int tx,unsigned int rx)468 void fbnic_reset_queues(struct fbnic_net *fbn,
469 unsigned int tx, unsigned int rx)
470 {
471 struct fbnic_dev *fbd = fbn->fbd;
472 unsigned int max_napis;
473
474 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
475
476 tx = min(tx, max_napis);
477 fbn->num_tx_queues = tx;
478
479 rx = min(rx, max_napis);
480 fbn->num_rx_queues = rx;
481
482 fbn->num_napi = max(tx, rx);
483 }
484
485 /**
486 * fbnic_netdev_free - Free the netdev associate with fbnic
487 * @fbd: Driver specific structure to free netdev from
488 *
489 * Allocate and initialize the netdev and netdev private structure. Bind
490 * together the hardware, netdev, and pci data structures.
491 **/
fbnic_netdev_free(struct fbnic_dev * fbd)492 void fbnic_netdev_free(struct fbnic_dev *fbd)
493 {
494 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
495
496 if (fbn->phylink)
497 phylink_destroy(fbn->phylink);
498
499 free_netdev(fbd->netdev);
500 fbd->netdev = NULL;
501 }
502
503 /**
504 * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
505 * @fbd: Driver specific structure to associate netdev with
506 *
507 * Allocate and initialize the netdev and netdev private structure. Bind
508 * together the hardware, netdev, and pci data structures.
509 *
510 * Return: 0 on success, negative on failure
511 **/
fbnic_netdev_alloc(struct fbnic_dev * fbd)512 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
513 {
514 struct net_device *netdev;
515 struct fbnic_net *fbn;
516 int default_queues;
517
518 netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
519 if (!netdev)
520 return NULL;
521
522 SET_NETDEV_DEV(netdev, fbd->dev);
523 fbd->netdev = netdev;
524
525 netdev->netdev_ops = &fbnic_netdev_ops;
526 netdev->stat_ops = &fbnic_stat_ops;
527
528 fbnic_set_ethtool_ops(netdev);
529
530 fbn = netdev_priv(netdev);
531
532 fbn->netdev = netdev;
533 fbn->fbd = fbd;
534 INIT_LIST_HEAD(&fbn->napis);
535
536 fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
537 fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
538 fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
539 fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
540
541 default_queues = netif_get_num_default_rss_queues();
542 if (default_queues > fbd->max_num_queues)
543 default_queues = fbd->max_num_queues;
544
545 fbnic_reset_queues(fbn, default_queues, default_queues);
546
547 fbnic_reset_indir_tbl(fbn);
548 fbnic_rss_key_fill(fbn->rss_key);
549 fbnic_rss_init_en_mask(fbn);
550
551 netdev->priv_flags |= IFF_UNICAST_FLT;
552
553 netdev->features |=
554 NETIF_F_RXHASH |
555 NETIF_F_SG |
556 NETIF_F_HW_CSUM |
557 NETIF_F_RXCSUM;
558
559 netdev->hw_features |= netdev->features;
560 netdev->vlan_features |= netdev->features;
561 netdev->hw_enc_features |= netdev->features;
562
563 netdev->min_mtu = IPV6_MIN_MTU;
564 netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
565
566 /* TBD: This is workaround for BMC as phylink doesn't have support
567 * for leavling the link enabled if a BMC is present.
568 */
569 netdev->ethtool->wol_enabled = true;
570
571 fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
572 fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
573 netif_carrier_off(netdev);
574
575 netif_tx_stop_all_queues(netdev);
576
577 if (fbnic_phylink_init(netdev)) {
578 fbnic_netdev_free(fbd);
579 return NULL;
580 }
581
582 return netdev;
583 }
584
fbnic_dsn_to_mac_addr(u64 dsn,char * addr)585 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
586 {
587 addr[0] = (dsn >> 56) & 0xFF;
588 addr[1] = (dsn >> 48) & 0xFF;
589 addr[2] = (dsn >> 40) & 0xFF;
590 addr[3] = (dsn >> 16) & 0xFF;
591 addr[4] = (dsn >> 8) & 0xFF;
592 addr[5] = dsn & 0xFF;
593
594 return is_valid_ether_addr(addr) ? 0 : -EINVAL;
595 }
596
597 /**
598 * fbnic_netdev_register - Initialize general software structures
599 * @netdev: Netdev containing structure to initialize and register
600 *
601 * Initialize the MAC address for the netdev and register it.
602 *
603 * Return: 0 on success, negative on failure
604 **/
fbnic_netdev_register(struct net_device * netdev)605 int fbnic_netdev_register(struct net_device *netdev)
606 {
607 struct fbnic_net *fbn = netdev_priv(netdev);
608 struct fbnic_dev *fbd = fbn->fbd;
609 u64 dsn = fbd->dsn;
610 u8 addr[ETH_ALEN];
611 int err;
612
613 err = fbnic_dsn_to_mac_addr(dsn, addr);
614 if (!err) {
615 ether_addr_copy(netdev->perm_addr, addr);
616 eth_hw_addr_set(netdev, addr);
617 } else {
618 /* A randomly assigned MAC address will cause provisioning
619 * issues so instead just fail to spawn the netdev and
620 * avoid any confusion.
621 */
622 dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
623 return err;
624 }
625
626 return register_netdev(netdev);
627 }
628
fbnic_netdev_unregister(struct net_device * netdev)629 void fbnic_netdev_unregister(struct net_device *netdev)
630 {
631 unregister_netdev(netdev);
632 }
633