• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_main.c: Main file for aQuantia Linux driver. */
9 
10 #include "aq_main.h"
11 #include "aq_nic.h"
12 #include "aq_pci_func.h"
13 #include "aq_ethtool.h"
14 #include "aq_ptp.h"
15 #include "aq_filters.h"
16 #include "aq_hw_utils.h"
17 #include "aq_vec.h"
18 
19 #include <linux/netdevice.h>
20 #include <linux/module.h>
21 #include <linux/ip.h>
22 #include <linux/udp.h>
23 #include <net/pkt_cls.h>
24 #include <linux/filter.h>
25 
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
28 MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
29 
30 DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
31 EXPORT_SYMBOL(aq_xdp_locking_key);
32 
33 static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
34 
35 static const struct net_device_ops aq_ndev_ops;
36 
37 static struct workqueue_struct *aq_ndev_wq;
38 
aq_ndev_schedule_work(struct work_struct * work)39 void aq_ndev_schedule_work(struct work_struct *work)
40 {
41 	queue_work(aq_ndev_wq, work);
42 }
43 
aq_ndev_alloc(void)44 struct net_device *aq_ndev_alloc(void)
45 {
46 	struct net_device *ndev = NULL;
47 	struct aq_nic_s *aq_nic = NULL;
48 
49 	ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
50 	if (!ndev)
51 		return NULL;
52 
53 	aq_nic = netdev_priv(ndev);
54 	aq_nic->ndev = ndev;
55 	ndev->netdev_ops = &aq_ndev_ops;
56 	ndev->ethtool_ops = &aq_ethtool_ops;
57 
58 	return ndev;
59 }
60 
aq_ndev_open(struct net_device * ndev)61 int aq_ndev_open(struct net_device *ndev)
62 {
63 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
64 	int err = 0;
65 
66 	err = aq_nic_init(aq_nic);
67 	if (err < 0)
68 		goto err_exit;
69 
70 	err = aq_reapply_rxnfc_all_rules(aq_nic);
71 	if (err < 0)
72 		goto err_exit;
73 
74 	err = aq_filters_vlans_update(aq_nic);
75 	if (err < 0)
76 		goto err_exit;
77 
78 	err = aq_nic_start(aq_nic);
79 	if (err < 0) {
80 		aq_nic_stop(aq_nic);
81 		goto err_exit;
82 	}
83 
84 err_exit:
85 	if (err < 0)
86 		aq_nic_deinit(aq_nic, true);
87 
88 	return err;
89 }
90 
aq_ndev_close(struct net_device * ndev)91 int aq_ndev_close(struct net_device *ndev)
92 {
93 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
94 	int err = 0;
95 
96 	err = aq_nic_stop(aq_nic);
97 	aq_nic_deinit(aq_nic, true);
98 
99 	return err;
100 }
101 
aq_ndev_start_xmit(struct sk_buff * skb,struct net_device * ndev)102 static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
103 {
104 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
105 
106 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
107 	if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
108 		/* Hardware adds the Timestamp for PTPv2 802.AS1
109 		 * and PTPv2 IPv4 UDP.
110 		 * We have to push even general 320 port messages to the ptp
111 		 * queue explicitly. This is a limitation of current firmware
112 		 * and hardware PTP design of the chip. Otherwise ptp stream
113 		 * will fail to sync
114 		 */
115 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
116 		    unlikely((ip_hdr(skb)->version == 4) &&
117 			     (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
118 			     ((udp_hdr(skb)->dest == htons(319)) ||
119 			      (udp_hdr(skb)->dest == htons(320)))) ||
120 		    unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
121 			return aq_ptp_xmit(aq_nic, skb);
122 	}
123 #endif
124 
125 	skb_tx_timestamp(skb);
126 	return aq_nic_xmit(aq_nic, skb);
127 }
128 
aq_ndev_change_mtu(struct net_device * ndev,int new_mtu)129 static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
130 {
131 	int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
132 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
133 	struct bpf_prog *prog;
134 	int err;
135 
136 	prog = READ_ONCE(aq_nic->xdp_prog);
137 	if (prog && !prog->aux->xdp_has_frags &&
138 	    new_frame_size > AQ_CFG_RX_FRAME_MAX) {
139 		netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
140 			   ndev->mtu);
141 		return -EOPNOTSUPP;
142 	}
143 
144 	err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
145 
146 	if (err < 0)
147 		goto err_exit;
148 	ndev->mtu = new_mtu;
149 
150 err_exit:
151 	return err;
152 }
153 
aq_ndev_set_features(struct net_device * ndev,netdev_features_t features)154 static int aq_ndev_set_features(struct net_device *ndev,
155 				netdev_features_t features)
156 {
157 	bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
158 	bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
159 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
160 	bool need_ndev_restart = false;
161 	struct aq_nic_cfg_s *aq_cfg;
162 	bool is_lro = false;
163 	int err = 0;
164 
165 	aq_cfg = aq_nic_get_cfg(aq_nic);
166 
167 	if (!(features & NETIF_F_NTUPLE)) {
168 		if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
169 			err = aq_clear_rxnfc_all_rules(aq_nic);
170 			if (unlikely(err))
171 				goto err_exit;
172 		}
173 	}
174 	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
175 		if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
176 			err = aq_filters_vlan_offload_off(aq_nic);
177 			if (unlikely(err))
178 				goto err_exit;
179 		}
180 	}
181 
182 	aq_cfg->features = features;
183 
184 	if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
185 		is_lro = features & NETIF_F_LRO;
186 
187 		if (aq_cfg->is_lro != is_lro) {
188 			aq_cfg->is_lro = is_lro;
189 			need_ndev_restart = true;
190 		}
191 	}
192 
193 	if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) {
194 		err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
195 							aq_cfg);
196 
197 		if (unlikely(err))
198 			goto err_exit;
199 	}
200 
201 	if (aq_cfg->is_vlan_rx_strip != is_vlan_rx_strip) {
202 		aq_cfg->is_vlan_rx_strip = is_vlan_rx_strip;
203 		need_ndev_restart = true;
204 	}
205 	if (aq_cfg->is_vlan_tx_insert != is_vlan_tx_insert) {
206 		aq_cfg->is_vlan_tx_insert = is_vlan_tx_insert;
207 		need_ndev_restart = true;
208 	}
209 
210 	if (need_ndev_restart && netif_running(ndev)) {
211 		aq_ndev_close(ndev);
212 		aq_ndev_open(ndev);
213 	}
214 
215 err_exit:
216 	return err;
217 }
218 
aq_ndev_fix_features(struct net_device * ndev,netdev_features_t features)219 static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
220 					      netdev_features_t features)
221 {
222 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
223 	struct bpf_prog *prog;
224 
225 	if (!(features & NETIF_F_RXCSUM))
226 		features &= ~NETIF_F_LRO;
227 
228 	prog = READ_ONCE(aq_nic->xdp_prog);
229 	if (prog && !prog->aux->xdp_has_frags &&
230 	    aq_nic->xdp_prog && features & NETIF_F_LRO) {
231 		netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
232 		features &= ~NETIF_F_LRO;
233 	}
234 
235 	return features;
236 }
237 
aq_ndev_set_mac_address(struct net_device * ndev,void * addr)238 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
239 {
240 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
241 	int err = 0;
242 
243 	err = eth_mac_addr(ndev, addr);
244 	if (err < 0)
245 		goto err_exit;
246 	err = aq_nic_set_mac(aq_nic, ndev);
247 	if (err < 0)
248 		goto err_exit;
249 
250 err_exit:
251 	return err;
252 }
253 
aq_ndev_set_multicast_settings(struct net_device * ndev)254 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
255 {
256 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
257 
258 	(void)aq_nic_set_multicast_list(aq_nic, ndev);
259 }
260 
261 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
aq_ndev_config_hwtstamp(struct aq_nic_s * aq_nic,struct hwtstamp_config * config)262 static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
263 				   struct hwtstamp_config *config)
264 {
265 	switch (config->tx_type) {
266 	case HWTSTAMP_TX_OFF:
267 	case HWTSTAMP_TX_ON:
268 		break;
269 	default:
270 		return -ERANGE;
271 	}
272 
273 	switch (config->rx_filter) {
274 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
275 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
276 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
277 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
278 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
279 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
280 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
281 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
282 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
283 		break;
284 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
285 	case HWTSTAMP_FILTER_NONE:
286 		break;
287 	default:
288 		return -ERANGE;
289 	}
290 
291 	return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
292 }
293 #endif
294 
aq_ndev_hwtstamp_set(struct aq_nic_s * aq_nic,struct ifreq * ifr)295 static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
296 {
297 	struct hwtstamp_config config;
298 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
299 	int ret_val;
300 #endif
301 
302 	if (!aq_nic->aq_ptp)
303 		return -EOPNOTSUPP;
304 
305 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
306 		return -EFAULT;
307 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
308 	ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
309 	if (ret_val)
310 		return ret_val;
311 #endif
312 
313 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
314 	       -EFAULT : 0;
315 }
316 
317 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
aq_ndev_hwtstamp_get(struct aq_nic_s * aq_nic,struct ifreq * ifr)318 static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
319 {
320 	struct hwtstamp_config config;
321 
322 	if (!aq_nic->aq_ptp)
323 		return -EOPNOTSUPP;
324 
325 	aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
326 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
327 	       -EFAULT : 0;
328 }
329 #endif
330 
aq_ndev_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)331 static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
332 {
333 	struct aq_nic_s *aq_nic = netdev_priv(netdev);
334 
335 	switch (cmd) {
336 	case SIOCSHWTSTAMP:
337 		return aq_ndev_hwtstamp_set(aq_nic, ifr);
338 
339 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
340 	case SIOCGHWTSTAMP:
341 		return aq_ndev_hwtstamp_get(aq_nic, ifr);
342 #endif
343 	}
344 
345 	return -EOPNOTSUPP;
346 }
347 
aq_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)348 static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
349 				  u16 vid)
350 {
351 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
352 
353 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
354 		return -EOPNOTSUPP;
355 
356 	set_bit(vid, aq_nic->active_vlans);
357 
358 	return aq_filters_vlans_update(aq_nic);
359 }
360 
aq_ndo_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)361 static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
362 				   u16 vid)
363 {
364 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
365 
366 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
367 		return -EOPNOTSUPP;
368 
369 	clear_bit(vid, aq_nic->active_vlans);
370 
371 	if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
372 		return aq_filters_vlans_update(aq_nic);
373 
374 	return 0;
375 }
376 
aq_validate_mqprio_opt(struct aq_nic_s * self,struct tc_mqprio_qopt_offload * mqprio,const unsigned int num_tc)377 static int aq_validate_mqprio_opt(struct aq_nic_s *self,
378 				  struct tc_mqprio_qopt_offload *mqprio,
379 				  const unsigned int num_tc)
380 {
381 	const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
382 	struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
383 	const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
384 					   AQ_CFG_TCS_MAX);
385 
386 	if (num_tc > tcs_max) {
387 		netdev_err(self->ndev, "Too many TCs requested\n");
388 		return -EOPNOTSUPP;
389 	}
390 
391 	if (num_tc != 0 && !is_power_of_2(num_tc)) {
392 		netdev_err(self->ndev, "TC count should be power of 2\n");
393 		return -EOPNOTSUPP;
394 	}
395 
396 	if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
397 		netdev_err(self->ndev, "Min tx rate is not supported\n");
398 		return -EOPNOTSUPP;
399 	}
400 
401 	return 0;
402 }
403 
aq_ndo_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)404 static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
405 			   void *type_data)
406 {
407 	struct tc_mqprio_qopt_offload *mqprio = type_data;
408 	struct aq_nic_s *aq_nic = netdev_priv(dev);
409 	bool has_min_rate;
410 	bool has_max_rate;
411 	int err;
412 	int i;
413 
414 	if (type != TC_SETUP_QDISC_MQPRIO)
415 		return -EOPNOTSUPP;
416 
417 	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
418 	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
419 
420 	err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
421 	if (err)
422 		return err;
423 
424 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
425 		if (has_max_rate) {
426 			u64 max_rate = mqprio->max_rate[i];
427 
428 			do_div(max_rate, AQ_MBPS_DIVISOR);
429 			aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
430 		}
431 
432 		if (has_min_rate) {
433 			u64 min_rate = mqprio->min_rate[i];
434 
435 			do_div(min_rate, AQ_MBPS_DIVISOR);
436 			aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
437 		}
438 	}
439 
440 	return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
441 				      mqprio->qopt.prio_tc_map);
442 }
443 
aq_xdp_setup(struct net_device * ndev,struct bpf_prog * prog,struct netlink_ext_ack * extack)444 static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
445 			struct netlink_ext_ack *extack)
446 {
447 	bool need_update, running = netif_running(ndev);
448 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
449 	struct bpf_prog *old_prog;
450 
451 	if (prog && !prog->aux->xdp_has_frags) {
452 		if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
453 			NL_SET_ERR_MSG_MOD(extack,
454 					   "prog does not support XDP frags");
455 			return -EOPNOTSUPP;
456 		}
457 
458 		if (prog && ndev->features & NETIF_F_LRO) {
459 			netdev_err(ndev,
460 				   "LRO is not supported with single buffer XDP, disabling\n");
461 			ndev->features &= ~NETIF_F_LRO;
462 		}
463 	}
464 
465 	need_update = !!aq_nic->xdp_prog != !!prog;
466 	if (running && need_update)
467 		aq_ndev_close(ndev);
468 
469 	old_prog = xchg(&aq_nic->xdp_prog, prog);
470 	if (old_prog)
471 		bpf_prog_put(old_prog);
472 
473 	if (!old_prog && prog)
474 		static_branch_inc(&aq_xdp_locking_key);
475 	else if (old_prog && !prog)
476 		static_branch_dec(&aq_xdp_locking_key);
477 
478 	if (running && need_update)
479 		return aq_ndev_open(ndev);
480 
481 	return 0;
482 }
483 
aq_xdp(struct net_device * dev,struct netdev_bpf * xdp)484 static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
485 {
486 	switch (xdp->command) {
487 	case XDP_SETUP_PROG:
488 		return aq_xdp_setup(dev, xdp->prog, xdp->extack);
489 	default:
490 		return -EINVAL;
491 	}
492 }
493 
494 static const struct net_device_ops aq_ndev_ops = {
495 	.ndo_open = aq_ndev_open,
496 	.ndo_stop = aq_ndev_close,
497 	.ndo_start_xmit = aq_ndev_start_xmit,
498 	.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
499 	.ndo_change_mtu = aq_ndev_change_mtu,
500 	.ndo_set_mac_address = aq_ndev_set_mac_address,
501 	.ndo_set_features = aq_ndev_set_features,
502 	.ndo_fix_features = aq_ndev_fix_features,
503 	.ndo_eth_ioctl = aq_ndev_ioctl,
504 	.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
505 	.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
506 	.ndo_setup_tc = aq_ndo_setup_tc,
507 	.ndo_bpf = aq_xdp,
508 	.ndo_xdp_xmit = aq_xdp_xmit,
509 };
510 
aq_ndev_init_module(void)511 static int __init aq_ndev_init_module(void)
512 {
513 	int ret;
514 
515 	aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
516 	if (!aq_ndev_wq) {
517 		pr_err("Failed to create workqueue\n");
518 		return -ENOMEM;
519 	}
520 
521 	ret = aq_pci_func_register_driver();
522 	if (ret) {
523 		destroy_workqueue(aq_ndev_wq);
524 		return ret;
525 	}
526 
527 	return 0;
528 }
529 
aq_ndev_exit_module(void)530 static void __exit aq_ndev_exit_module(void)
531 {
532 	aq_pci_func_unregister_driver();
533 
534 	if (aq_ndev_wq) {
535 		destroy_workqueue(aq_ndev_wq);
536 		aq_ndev_wq = NULL;
537 	}
538 }
539 
540 module_init(aq_ndev_init_module);
541 module_exit(aq_ndev_exit_module);
542