• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
21 #include <net/gre.h>
22 #include <net/vxlan.h>
23 
24 #include "hnae3.h"
25 #include "hns3_enet.h"
26 
27 const char hns3_driver_name[] = "hns3";
28 const char hns3_driver_version[] = VERMAGIC_STRING;
29 static const char hns3_driver_string[] =
30 			"Hisilicon Ethernet Network Driver for Hip08 Family";
31 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32 static struct hnae3_client client;
33 
34 /* hns3_pci_tbl - PCI Device ID Table
35  *
36  * Last entry must be all 0s
37  *
38  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39  *   Class, Class Mask, private data (not used) }
40  */
41 static const struct pci_device_id hns3_pci_tbl[] = {
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
45 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
47 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
48 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
49 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
51 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
53 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54 	/* required last entry */
55 	{0, }
56 };
57 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
58 
hns3_irq_handle(int irq,void * dev)59 static irqreturn_t hns3_irq_handle(int irq, void *dev)
60 {
61 	struct hns3_enet_tqp_vector *tqp_vector = dev;
62 
63 	napi_schedule(&tqp_vector->napi);
64 
65 	return IRQ_HANDLED;
66 }
67 
hns3_nic_uninit_irq(struct hns3_nic_priv * priv)68 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
69 {
70 	struct hns3_enet_tqp_vector *tqp_vectors;
71 	unsigned int i;
72 
73 	for (i = 0; i < priv->vector_num; i++) {
74 		tqp_vectors = &priv->tqp_vector[i];
75 
76 		if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
77 			continue;
78 
79 		/* release the irq resource */
80 		free_irq(tqp_vectors->vector_irq, tqp_vectors);
81 		tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
82 	}
83 }
84 
hns3_nic_init_irq(struct hns3_nic_priv * priv)85 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
86 {
87 	struct hns3_enet_tqp_vector *tqp_vectors;
88 	int txrx_int_idx = 0;
89 	int rx_int_idx = 0;
90 	int tx_int_idx = 0;
91 	unsigned int i;
92 	int ret;
93 
94 	for (i = 0; i < priv->vector_num; i++) {
95 		tqp_vectors = &priv->tqp_vector[i];
96 
97 		if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
98 			continue;
99 
100 		if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
101 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 				 "%s-%s-%d", priv->netdev->name, "TxRx",
103 				 txrx_int_idx++);
104 			txrx_int_idx++;
105 		} else if (tqp_vectors->rx_group.ring) {
106 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
107 				 "%s-%s-%d", priv->netdev->name, "Rx",
108 				 rx_int_idx++);
109 		} else if (tqp_vectors->tx_group.ring) {
110 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
111 				 "%s-%s-%d", priv->netdev->name, "Tx",
112 				 tx_int_idx++);
113 		} else {
114 			/* Skip this unused q_vector */
115 			continue;
116 		}
117 
118 		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
119 
120 		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
121 				  tqp_vectors->name,
122 				       tqp_vectors);
123 		if (ret) {
124 			netdev_err(priv->netdev, "request irq(%d) fail\n",
125 				   tqp_vectors->vector_irq);
126 			return ret;
127 		}
128 
129 		tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
130 	}
131 
132 	return 0;
133 }
134 
hns3_mask_vector_irq(struct hns3_enet_tqp_vector * tqp_vector,u32 mask_en)135 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
136 				 u32 mask_en)
137 {
138 	writel(mask_en, tqp_vector->mask_addr);
139 }
140 
hns3_vector_enable(struct hns3_enet_tqp_vector * tqp_vector)141 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
142 {
143 	napi_enable(&tqp_vector->napi);
144 
145 	/* enable vector */
146 	hns3_mask_vector_irq(tqp_vector, 1);
147 }
148 
hns3_vector_disable(struct hns3_enet_tqp_vector * tqp_vector)149 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
150 {
151 	/* disable vector */
152 	hns3_mask_vector_irq(tqp_vector, 0);
153 
154 	disable_irq(tqp_vector->vector_irq);
155 	napi_disable(&tqp_vector->napi);
156 }
157 
hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector * tqp_vector,u32 gl_value)158 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
159 				       u32 gl_value)
160 {
161 	/* this defines the configuration for GL (Interrupt Gap Limiter)
162 	 * GL defines inter interrupt gap.
163 	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
164 	 */
165 	writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
166 	writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
167 	writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
168 }
169 
hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector * tqp_vector,u32 rl_value)170 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
171 				       u32 rl_value)
172 {
173 	/* this defines the configuration for RL (Interrupt Rate Limiter).
174 	 * Rl defines rate of interrupts i.e. number of interrupts-per-second
175 	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
176 	 */
177 	writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
178 }
179 
hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector * tqp_vector)180 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
181 {
182 	/* initialize the configuration for interrupt coalescing.
183 	 * 1. GL (Interrupt Gap Limiter)
184 	 * 2. RL (Interrupt Rate Limiter)
185 	 */
186 
187 	/* Default :enable interrupt coalesce */
188 	tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
189 	tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
190 	hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
191 	/* for now we are disabling Interrupt RL - we
192 	 * will re-enable later
193 	 */
194 	hns3_set_vector_coalesc_rl(tqp_vector, 0);
195 	tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
196 	tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
197 }
198 
hns3_nic_net_up(struct net_device * netdev)199 static int hns3_nic_net_up(struct net_device *netdev)
200 {
201 	struct hns3_nic_priv *priv = netdev_priv(netdev);
202 	struct hnae3_handle *h = priv->ae_handle;
203 	int i, j;
204 	int ret;
205 
206 	/* get irq resource for all vectors */
207 	ret = hns3_nic_init_irq(priv);
208 	if (ret) {
209 		netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
210 		return ret;
211 	}
212 
213 	/* enable the vectors */
214 	for (i = 0; i < priv->vector_num; i++)
215 		hns3_vector_enable(&priv->tqp_vector[i]);
216 
217 	/* start the ae_dev */
218 	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
219 	if (ret)
220 		goto out_start_err;
221 
222 	return 0;
223 
224 out_start_err:
225 	for (j = i - 1; j >= 0; j--)
226 		hns3_vector_disable(&priv->tqp_vector[j]);
227 
228 	hns3_nic_uninit_irq(priv);
229 
230 	return ret;
231 }
232 
hns3_nic_net_open(struct net_device * netdev)233 static int hns3_nic_net_open(struct net_device *netdev)
234 {
235 	struct hns3_nic_priv *priv = netdev_priv(netdev);
236 	struct hnae3_handle *h = priv->ae_handle;
237 	int ret;
238 
239 	netif_carrier_off(netdev);
240 
241 	ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
242 	if (ret) {
243 		netdev_err(netdev,
244 			   "netif_set_real_num_tx_queues fail, ret=%d!\n",
245 			   ret);
246 		return ret;
247 	}
248 
249 	ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
250 	if (ret) {
251 		netdev_err(netdev,
252 			   "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
253 		return ret;
254 	}
255 
256 	ret = hns3_nic_net_up(netdev);
257 	if (ret) {
258 		netdev_err(netdev,
259 			   "hns net up fail, ret=%d!\n", ret);
260 		return ret;
261 	}
262 
263 	return 0;
264 }
265 
hns3_nic_net_down(struct net_device * netdev)266 static void hns3_nic_net_down(struct net_device *netdev)
267 {
268 	struct hns3_nic_priv *priv = netdev_priv(netdev);
269 	const struct hnae3_ae_ops *ops;
270 	int i;
271 
272 	/* stop ae_dev */
273 	ops = priv->ae_handle->ae_algo->ops;
274 	if (ops->stop)
275 		ops->stop(priv->ae_handle);
276 
277 	/* disable vectors */
278 	for (i = 0; i < priv->vector_num; i++)
279 		hns3_vector_disable(&priv->tqp_vector[i]);
280 
281 	/* free irq resources */
282 	hns3_nic_uninit_irq(priv);
283 }
284 
hns3_nic_net_stop(struct net_device * netdev)285 static int hns3_nic_net_stop(struct net_device *netdev)
286 {
287 	netif_tx_stop_all_queues(netdev);
288 	netif_carrier_off(netdev);
289 
290 	hns3_nic_net_down(netdev);
291 
292 	return 0;
293 }
294 
hns3_set_multicast_list(struct net_device * netdev)295 void hns3_set_multicast_list(struct net_device *netdev)
296 {
297 	struct hns3_nic_priv *priv = netdev_priv(netdev);
298 	struct hnae3_handle *h = priv->ae_handle;
299 	struct netdev_hw_addr *ha = NULL;
300 
301 	if (h->ae_algo->ops->set_mc_addr) {
302 		netdev_for_each_mc_addr(ha, netdev)
303 			if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
304 				netdev_err(netdev, "set multicast fail\n");
305 	}
306 }
307 
hns3_nic_uc_sync(struct net_device * netdev,const unsigned char * addr)308 static int hns3_nic_uc_sync(struct net_device *netdev,
309 			    const unsigned char *addr)
310 {
311 	struct hns3_nic_priv *priv = netdev_priv(netdev);
312 	struct hnae3_handle *h = priv->ae_handle;
313 
314 	if (h->ae_algo->ops->add_uc_addr)
315 		return h->ae_algo->ops->add_uc_addr(h, addr);
316 
317 	return 0;
318 }
319 
hns3_nic_uc_unsync(struct net_device * netdev,const unsigned char * addr)320 static int hns3_nic_uc_unsync(struct net_device *netdev,
321 			      const unsigned char *addr)
322 {
323 	struct hns3_nic_priv *priv = netdev_priv(netdev);
324 	struct hnae3_handle *h = priv->ae_handle;
325 
326 	if (h->ae_algo->ops->rm_uc_addr)
327 		return h->ae_algo->ops->rm_uc_addr(h, addr);
328 
329 	return 0;
330 }
331 
hns3_nic_mc_sync(struct net_device * netdev,const unsigned char * addr)332 static int hns3_nic_mc_sync(struct net_device *netdev,
333 			    const unsigned char *addr)
334 {
335 	struct hns3_nic_priv *priv = netdev_priv(netdev);
336 	struct hnae3_handle *h = priv->ae_handle;
337 
338 	if (h->ae_algo->ops->add_mc_addr)
339 		return h->ae_algo->ops->add_mc_addr(h, addr);
340 
341 	return 0;
342 }
343 
hns3_nic_mc_unsync(struct net_device * netdev,const unsigned char * addr)344 static int hns3_nic_mc_unsync(struct net_device *netdev,
345 			      const unsigned char *addr)
346 {
347 	struct hns3_nic_priv *priv = netdev_priv(netdev);
348 	struct hnae3_handle *h = priv->ae_handle;
349 
350 	if (h->ae_algo->ops->rm_mc_addr)
351 		return h->ae_algo->ops->rm_mc_addr(h, addr);
352 
353 	return 0;
354 }
355 
hns3_nic_set_rx_mode(struct net_device * netdev)356 void hns3_nic_set_rx_mode(struct net_device *netdev)
357 {
358 	struct hns3_nic_priv *priv = netdev_priv(netdev);
359 	struct hnae3_handle *h = priv->ae_handle;
360 
361 	if (h->ae_algo->ops->set_promisc_mode) {
362 		if (netdev->flags & IFF_PROMISC)
363 			h->ae_algo->ops->set_promisc_mode(h, 1);
364 		else
365 			h->ae_algo->ops->set_promisc_mode(h, 0);
366 	}
367 	if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
368 		netdev_err(netdev, "sync uc address fail\n");
369 	if (netdev->flags & IFF_MULTICAST)
370 		if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
371 			netdev_err(netdev, "sync mc address fail\n");
372 }
373 
hns3_set_tso(struct sk_buff * skb,u32 * paylen,u16 * mss,u32 * type_cs_vlan_tso)374 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
375 			u16 *mss, u32 *type_cs_vlan_tso)
376 {
377 	u32 l4_offset, hdr_len;
378 	union l3_hdr_info l3;
379 	union l4_hdr_info l4;
380 	u32 l4_paylen;
381 	int ret;
382 
383 	if (!skb_is_gso(skb))
384 		return 0;
385 
386 	ret = skb_cow_head(skb, 0);
387 	if (ret)
388 		return ret;
389 
390 	l3.hdr = skb_network_header(skb);
391 	l4.hdr = skb_transport_header(skb);
392 
393 	/* Software should clear the IPv4's checksum field when tso is
394 	 * needed.
395 	 */
396 	if (l3.v4->version == 4)
397 		l3.v4->check = 0;
398 
399 	/* tunnel packet.*/
400 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
401 					 SKB_GSO_GRE_CSUM |
402 					 SKB_GSO_UDP_TUNNEL |
403 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
404 		if ((!(skb_shinfo(skb)->gso_type &
405 		    SKB_GSO_PARTIAL)) &&
406 		    (skb_shinfo(skb)->gso_type &
407 		    SKB_GSO_UDP_TUNNEL_CSUM)) {
408 			/* Software should clear the udp's checksum
409 			 * field when tso is needed.
410 			 */
411 			l4.udp->check = 0;
412 		}
413 		/* reset l3&l4 pointers from outer to inner headers */
414 		l3.hdr = skb_inner_network_header(skb);
415 		l4.hdr = skb_inner_transport_header(skb);
416 
417 		/* Software should clear the IPv4's checksum field when
418 		 * tso is needed.
419 		 */
420 		if (l3.v4->version == 4)
421 			l3.v4->check = 0;
422 	}
423 
424 	/* normal or tunnel packet*/
425 	l4_offset = l4.hdr - skb->data;
426 	hdr_len = (l4.tcp->doff * 4) + l4_offset;
427 
428 	/* remove payload length from inner pseudo checksum when tso*/
429 	l4_paylen = skb->len - l4_offset;
430 	csum_replace_by_diff(&l4.tcp->check,
431 			     (__force __wsum)htonl(l4_paylen));
432 
433 	/* find the txbd field values */
434 	*paylen = skb->len - hdr_len;
435 	hnae_set_bit(*type_cs_vlan_tso,
436 		     HNS3_TXD_TSO_B, 1);
437 
438 	/* get MSS for TSO */
439 	*mss = skb_shinfo(skb)->gso_size;
440 
441 	return 0;
442 }
443 
hns3_get_l4_protocol(struct sk_buff * skb,u8 * ol4_proto,u8 * il4_proto)444 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
445 				u8 *il4_proto)
446 {
447 	union {
448 		struct iphdr *v4;
449 		struct ipv6hdr *v6;
450 		unsigned char *hdr;
451 	} l3;
452 	unsigned char *l4_hdr;
453 	unsigned char *exthdr;
454 	u8 l4_proto_tmp;
455 	__be16 frag_off;
456 
457 	/* find outer header point */
458 	l3.hdr = skb_network_header(skb);
459 	l4_hdr = skb_inner_transport_header(skb);
460 
461 	if (skb->protocol == htons(ETH_P_IPV6)) {
462 		exthdr = l3.hdr + sizeof(*l3.v6);
463 		l4_proto_tmp = l3.v6->nexthdr;
464 		if (l4_hdr != exthdr)
465 			ipv6_skip_exthdr(skb, exthdr - skb->data,
466 					 &l4_proto_tmp, &frag_off);
467 	} else if (skb->protocol == htons(ETH_P_IP)) {
468 		l4_proto_tmp = l3.v4->protocol;
469 	} else {
470 		return -EINVAL;
471 	}
472 
473 	*ol4_proto = l4_proto_tmp;
474 
475 	/* tunnel packet */
476 	if (!skb->encapsulation) {
477 		*il4_proto = 0;
478 		return 0;
479 	}
480 
481 	/* find inner header point */
482 	l3.hdr = skb_inner_network_header(skb);
483 	l4_hdr = skb_inner_transport_header(skb);
484 
485 	if (l3.v6->version == 6) {
486 		exthdr = l3.hdr + sizeof(*l3.v6);
487 		l4_proto_tmp = l3.v6->nexthdr;
488 		if (l4_hdr != exthdr)
489 			ipv6_skip_exthdr(skb, exthdr - skb->data,
490 					 &l4_proto_tmp, &frag_off);
491 	} else if (l3.v4->version == 4) {
492 		l4_proto_tmp = l3.v4->protocol;
493 	}
494 
495 	*il4_proto = l4_proto_tmp;
496 
497 	return 0;
498 }
499 
hns3_set_l2l3l4_len(struct sk_buff * skb,u8 ol4_proto,u8 il4_proto,u32 * type_cs_vlan_tso,u32 * ol_type_vlan_len_msec)500 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
501 				u8 il4_proto, u32 *type_cs_vlan_tso,
502 				u32 *ol_type_vlan_len_msec)
503 {
504 	union {
505 		struct iphdr *v4;
506 		struct ipv6hdr *v6;
507 		unsigned char *hdr;
508 	} l3;
509 	union {
510 		struct tcphdr *tcp;
511 		struct udphdr *udp;
512 		struct gre_base_hdr *gre;
513 		unsigned char *hdr;
514 	} l4;
515 	unsigned char *l2_hdr;
516 	u8 l4_proto = ol4_proto;
517 	u32 ol2_len;
518 	u32 ol3_len;
519 	u32 ol4_len;
520 	u32 l2_len;
521 	u32 l3_len;
522 
523 	l3.hdr = skb_network_header(skb);
524 	l4.hdr = skb_transport_header(skb);
525 
526 	/* compute L2 header size for normal packet, defined in 2 Bytes */
527 	l2_len = l3.hdr - skb->data;
528 	hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
529 		       HNS3_TXD_L2LEN_S, l2_len >> 1);
530 
531 	/* tunnel packet*/
532 	if (skb->encapsulation) {
533 		/* compute OL2 header size, defined in 2 Bytes */
534 		ol2_len = l2_len;
535 		hnae_set_field(*ol_type_vlan_len_msec,
536 			       HNS3_TXD_L2LEN_M,
537 			       HNS3_TXD_L2LEN_S, ol2_len >> 1);
538 
539 		/* compute OL3 header size, defined in 4 Bytes */
540 		ol3_len = l4.hdr - l3.hdr;
541 		hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
542 			       HNS3_TXD_L3LEN_S, ol3_len >> 2);
543 
544 		/* MAC in UDP, MAC in GRE (0x6558)*/
545 		if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
546 			/* switch MAC header ptr from outer to inner header.*/
547 			l2_hdr = skb_inner_mac_header(skb);
548 
549 			/* compute OL4 header size, defined in 4 Bytes. */
550 			ol4_len = l2_hdr - l4.hdr;
551 			hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
552 				       HNS3_TXD_L4LEN_S, ol4_len >> 2);
553 
554 			/* switch IP header ptr from outer to inner header */
555 			l3.hdr = skb_inner_network_header(skb);
556 
557 			/* compute inner l2 header size, defined in 2 Bytes. */
558 			l2_len = l3.hdr - l2_hdr;
559 			hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
560 				       HNS3_TXD_L2LEN_S, l2_len >> 1);
561 		} else {
562 			/* skb packet types not supported by hardware,
563 			 * txbd len fild doesn't be filled.
564 			 */
565 			return;
566 		}
567 
568 		/* switch L4 header pointer from outer to inner */
569 		l4.hdr = skb_inner_transport_header(skb);
570 
571 		l4_proto = il4_proto;
572 	}
573 
574 	/* compute inner(/normal) L3 header size, defined in 4 Bytes */
575 	l3_len = l4.hdr - l3.hdr;
576 	hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
577 		       HNS3_TXD_L3LEN_S, l3_len >> 2);
578 
579 	/* compute inner(/normal) L4 header size, defined in 4 Bytes */
580 	switch (l4_proto) {
581 	case IPPROTO_TCP:
582 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
583 			       HNS3_TXD_L4LEN_S, l4.tcp->doff);
584 		break;
585 	case IPPROTO_SCTP:
586 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
587 			       HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
588 		break;
589 	case IPPROTO_UDP:
590 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
591 			       HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
592 		break;
593 	default:
594 		/* skb packet types not supported by hardware,
595 		 * txbd len fild doesn't be filled.
596 		 */
597 		return;
598 	}
599 }
600 
hns3_set_l3l4_type_csum(struct sk_buff * skb,u8 ol4_proto,u8 il4_proto,u32 * type_cs_vlan_tso,u32 * ol_type_vlan_len_msec)601 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
602 				   u8 il4_proto, u32 *type_cs_vlan_tso,
603 				   u32 *ol_type_vlan_len_msec)
604 {
605 	union {
606 		struct iphdr *v4;
607 		struct ipv6hdr *v6;
608 		unsigned char *hdr;
609 	} l3;
610 	u32 l4_proto = ol4_proto;
611 
612 	l3.hdr = skb_network_header(skb);
613 
614 	/* define OL3 type and tunnel type(OL4).*/
615 	if (skb->encapsulation) {
616 		/* define outer network header type.*/
617 		if (skb->protocol == htons(ETH_P_IP)) {
618 			if (skb_is_gso(skb))
619 				hnae_set_field(*ol_type_vlan_len_msec,
620 					       HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
621 					       HNS3_OL3T_IPV4_CSUM);
622 			else
623 				hnae_set_field(*ol_type_vlan_len_msec,
624 					       HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
625 					       HNS3_OL3T_IPV4_NO_CSUM);
626 
627 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
628 			hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
629 				       HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
630 		}
631 
632 		/* define tunnel type(OL4).*/
633 		switch (l4_proto) {
634 		case IPPROTO_UDP:
635 			hnae_set_field(*ol_type_vlan_len_msec,
636 				       HNS3_TXD_TUNTYPE_M,
637 				       HNS3_TXD_TUNTYPE_S,
638 				       HNS3_TUN_MAC_IN_UDP);
639 			break;
640 		case IPPROTO_GRE:
641 			hnae_set_field(*ol_type_vlan_len_msec,
642 				       HNS3_TXD_TUNTYPE_M,
643 				       HNS3_TXD_TUNTYPE_S,
644 				       HNS3_TUN_NVGRE);
645 			break;
646 		default:
647 			/* drop the skb tunnel packet if hardware don't support,
648 			 * because hardware can't calculate csum when TSO.
649 			 */
650 			if (skb_is_gso(skb))
651 				return -EDOM;
652 
653 			/* the stack computes the IP header already,
654 			 * driver calculate l4 checksum when not TSO.
655 			 */
656 			skb_checksum_help(skb);
657 			return 0;
658 		}
659 
660 		l3.hdr = skb_inner_network_header(skb);
661 		l4_proto = il4_proto;
662 	}
663 
664 	if (l3.v4->version == 4) {
665 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
666 			       HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
667 
668 		/* the stack computes the IP header already, the only time we
669 		 * need the hardware to recompute it is in the case of TSO.
670 		 */
671 		if (skb_is_gso(skb))
672 			hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
673 
674 		hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
675 	} else if (l3.v6->version == 6) {
676 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
677 			       HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
678 		hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
679 	}
680 
681 	switch (l4_proto) {
682 	case IPPROTO_TCP:
683 		hnae_set_field(*type_cs_vlan_tso,
684 			       HNS3_TXD_L4T_M,
685 			       HNS3_TXD_L4T_S,
686 			       HNS3_L4T_TCP);
687 		break;
688 	case IPPROTO_UDP:
689 		hnae_set_field(*type_cs_vlan_tso,
690 			       HNS3_TXD_L4T_M,
691 			       HNS3_TXD_L4T_S,
692 			       HNS3_L4T_UDP);
693 		break;
694 	case IPPROTO_SCTP:
695 		hnae_set_field(*type_cs_vlan_tso,
696 			       HNS3_TXD_L4T_M,
697 			       HNS3_TXD_L4T_S,
698 			       HNS3_L4T_SCTP);
699 		break;
700 	default:
701 		/* drop the skb tunnel packet if hardware don't support,
702 		 * because hardware can't calculate csum when TSO.
703 		 */
704 		if (skb_is_gso(skb))
705 			return -EDOM;
706 
707 		/* the stack computes the IP header already,
708 		 * driver calculate l4 checksum when not TSO.
709 		 */
710 		skb_checksum_help(skb);
711 		return 0;
712 	}
713 
714 	return 0;
715 }
716 
hns3_set_txbd_baseinfo(u16 * bdtp_fe_sc_vld_ra_ri,int frag_end)717 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
718 {
719 	/* Config bd buffer end */
720 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
721 		       HNS3_TXD_BDTYPE_M, 0);
722 	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
723 	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
724 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
725 }
726 
hns3_fill_desc(struct hns3_enet_ring * ring,void * priv,int size,dma_addr_t dma,int frag_end,enum hns_desc_type type)727 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
728 			  int size, dma_addr_t dma, int frag_end,
729 			  enum hns_desc_type type)
730 {
731 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
732 	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
733 	u32 ol_type_vlan_len_msec = 0;
734 	u16 bdtp_fe_sc_vld_ra_ri = 0;
735 	u32 type_cs_vlan_tso = 0;
736 	struct sk_buff *skb;
737 	u32 paylen = 0;
738 	u16 mss = 0;
739 	__be16 protocol;
740 	u8 ol4_proto;
741 	u8 il4_proto;
742 	int ret;
743 
744 	/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
745 	desc_cb->priv = priv;
746 	desc_cb->length = size;
747 	desc_cb->dma = dma;
748 	desc_cb->type = type;
749 
750 	/* now, fill the descriptor */
751 	desc->addr = cpu_to_le64(dma);
752 	desc->tx.send_size = cpu_to_le16((u16)size);
753 	hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
754 	desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
755 
756 	if (type == DESC_TYPE_SKB) {
757 		skb = (struct sk_buff *)priv;
758 		paylen = cpu_to_le16(skb->len);
759 
760 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 			skb_reset_mac_len(skb);
762 			protocol = skb->protocol;
763 
764 			/* vlan packet*/
765 			if (protocol == htons(ETH_P_8021Q)) {
766 				protocol = vlan_get_protocol(skb);
767 				skb->protocol = protocol;
768 			}
769 			ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
770 			if (ret)
771 				return ret;
772 			hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
773 					    &type_cs_vlan_tso,
774 					    &ol_type_vlan_len_msec);
775 			ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
776 						      &type_cs_vlan_tso,
777 						      &ol_type_vlan_len_msec);
778 			if (ret)
779 				return ret;
780 
781 			ret = hns3_set_tso(skb, &paylen, &mss,
782 					   &type_cs_vlan_tso);
783 			if (ret)
784 				return ret;
785 		}
786 
787 		/* Set txbd */
788 		desc->tx.ol_type_vlan_len_msec =
789 			cpu_to_le32(ol_type_vlan_len_msec);
790 		desc->tx.type_cs_vlan_tso_len =
791 			cpu_to_le32(type_cs_vlan_tso);
792 		desc->tx.paylen = cpu_to_le16(paylen);
793 		desc->tx.mss = cpu_to_le16(mss);
794 	}
795 
796 	/* move ring pointer to next.*/
797 	ring_ptr_move_fw(ring, next_to_use);
798 
799 	return 0;
800 }
801 
hns3_fill_desc_tso(struct hns3_enet_ring * ring,void * priv,int size,dma_addr_t dma,int frag_end,enum hns_desc_type type)802 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
803 			      int size, dma_addr_t dma, int frag_end,
804 			      enum hns_desc_type type)
805 {
806 	unsigned int frag_buf_num;
807 	unsigned int k;
808 	int sizeoflast;
809 	int ret;
810 
811 	frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
812 	sizeoflast = size % HNS3_MAX_BD_SIZE;
813 	sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
814 
815 	/* When the frag size is bigger than hardware, split this frag */
816 	for (k = 0; k < frag_buf_num; k++) {
817 		ret = hns3_fill_desc(ring, priv,
818 				     (k == frag_buf_num - 1) ?
819 				sizeoflast : HNS3_MAX_BD_SIZE,
820 				dma + HNS3_MAX_BD_SIZE * k,
821 				frag_end && (k == frag_buf_num - 1) ? 1 : 0,
822 				(type == DESC_TYPE_SKB && !k) ?
823 					DESC_TYPE_SKB : DESC_TYPE_PAGE);
824 		if (ret)
825 			return ret;
826 	}
827 
828 	return 0;
829 }
830 
hns3_nic_maybe_stop_tso(struct sk_buff ** out_skb,int * bnum,struct hns3_enet_ring * ring)831 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
832 				   struct hns3_enet_ring *ring)
833 {
834 	struct sk_buff *skb = *out_skb;
835 	struct skb_frag_struct *frag;
836 	int bdnum_for_frag;
837 	int frag_num;
838 	int buf_num;
839 	int size;
840 	int i;
841 
842 	size = skb_headlen(skb);
843 	buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
844 
845 	frag_num = skb_shinfo(skb)->nr_frags;
846 	for (i = 0; i < frag_num; i++) {
847 		frag = &skb_shinfo(skb)->frags[i];
848 		size = skb_frag_size(frag);
849 		bdnum_for_frag =
850 			(size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
851 		if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
852 			return -ENOMEM;
853 
854 		buf_num += bdnum_for_frag;
855 	}
856 
857 	if (buf_num > ring_space(ring))
858 		return -EBUSY;
859 
860 	*bnum = buf_num;
861 	return 0;
862 }
863 
hns3_nic_maybe_stop_tx(struct sk_buff ** out_skb,int * bnum,struct hns3_enet_ring * ring)864 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
865 				  struct hns3_enet_ring *ring)
866 {
867 	struct sk_buff *skb = *out_skb;
868 	int buf_num;
869 
870 	/* No. of segments (plus a header) */
871 	buf_num = skb_shinfo(skb)->nr_frags + 1;
872 
873 	if (buf_num > ring_space(ring))
874 		return -EBUSY;
875 
876 	*bnum = buf_num;
877 
878 	return 0;
879 }
880 
hns_nic_dma_unmap(struct hns3_enet_ring * ring,int next_to_use_orig)881 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
882 {
883 	struct device *dev = ring_to_dev(ring);
884 	unsigned int i;
885 
886 	for (i = 0; i < ring->desc_num; i++) {
887 		/* check if this is where we started */
888 		if (ring->next_to_use == next_to_use_orig)
889 			break;
890 
891 		/* unmap the descriptor dma address */
892 		if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
893 			dma_unmap_single(dev,
894 					 ring->desc_cb[ring->next_to_use].dma,
895 					ring->desc_cb[ring->next_to_use].length,
896 					DMA_TO_DEVICE);
897 		else
898 			dma_unmap_page(dev,
899 				       ring->desc_cb[ring->next_to_use].dma,
900 				       ring->desc_cb[ring->next_to_use].length,
901 				       DMA_TO_DEVICE);
902 
903 		/* rollback one */
904 		ring_ptr_move_bw(ring, next_to_use);
905 	}
906 }
907 
hns3_nic_net_xmit(struct sk_buff * skb,struct net_device * netdev)908 static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
909 				     struct net_device *netdev)
910 {
911 	struct hns3_nic_priv *priv = netdev_priv(netdev);
912 	struct hns3_nic_ring_data *ring_data =
913 		&tx_ring_data(priv, skb->queue_mapping);
914 	struct hns3_enet_ring *ring = ring_data->ring;
915 	struct device *dev = priv->dev;
916 	struct netdev_queue *dev_queue;
917 	struct skb_frag_struct *frag;
918 	int next_to_use_head;
919 	int next_to_use_frag;
920 	dma_addr_t dma;
921 	int buf_num;
922 	int seg_num;
923 	int size;
924 	int ret;
925 	int i;
926 
927 	/* Prefetch the data used later */
928 	prefetch(skb->data);
929 
930 	switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
931 	case -EBUSY:
932 		u64_stats_update_begin(&ring->syncp);
933 		ring->stats.tx_busy++;
934 		u64_stats_update_end(&ring->syncp);
935 
936 		goto out_net_tx_busy;
937 	case -ENOMEM:
938 		u64_stats_update_begin(&ring->syncp);
939 		ring->stats.sw_err_cnt++;
940 		u64_stats_update_end(&ring->syncp);
941 		netdev_err(netdev, "no memory to xmit!\n");
942 
943 		goto out_err_tx_ok;
944 	default:
945 		break;
946 	}
947 
948 	/* No. of segments (plus a header) */
949 	seg_num = skb_shinfo(skb)->nr_frags + 1;
950 	/* Fill the first part */
951 	size = skb_headlen(skb);
952 
953 	next_to_use_head = ring->next_to_use;
954 
955 	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
956 	if (dma_mapping_error(dev, dma)) {
957 		netdev_err(netdev, "TX head DMA map failed\n");
958 		ring->stats.sw_err_cnt++;
959 		goto out_err_tx_ok;
960 	}
961 
962 	ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
963 			   DESC_TYPE_SKB);
964 	if (ret)
965 		goto head_dma_map_err;
966 
967 	next_to_use_frag = ring->next_to_use;
968 	/* Fill the fragments */
969 	for (i = 1; i < seg_num; i++) {
970 		frag = &skb_shinfo(skb)->frags[i - 1];
971 		size = skb_frag_size(frag);
972 		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
973 		if (dma_mapping_error(dev, dma)) {
974 			netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
975 			ring->stats.sw_err_cnt++;
976 			goto frag_dma_map_err;
977 		}
978 		ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
979 				    seg_num - 1 == i ? 1 : 0,
980 				    DESC_TYPE_PAGE);
981 
982 		if (ret)
983 			goto frag_dma_map_err;
984 	}
985 
986 	/* Complete translate all packets */
987 	dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
988 	netdev_tx_sent_queue(dev_queue, skb->len);
989 
990 	wmb(); /* Commit all data before submit */
991 
992 	hnae_queue_xmit(ring->tqp, buf_num);
993 
994 	return NETDEV_TX_OK;
995 
996 frag_dma_map_err:
997 	hns_nic_dma_unmap(ring, next_to_use_frag);
998 
999 head_dma_map_err:
1000 	hns_nic_dma_unmap(ring, next_to_use_head);
1001 
1002 out_err_tx_ok:
1003 	dev_kfree_skb_any(skb);
1004 	return NETDEV_TX_OK;
1005 
1006 out_net_tx_busy:
1007 	netif_stop_subqueue(netdev, ring_data->queue_index);
1008 	smp_mb(); /* Commit all data before submit */
1009 
1010 	return NETDEV_TX_BUSY;
1011 }
1012 
hns3_nic_net_set_mac_address(struct net_device * netdev,void * p)1013 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1014 {
1015 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1016 	struct hnae3_handle *h = priv->ae_handle;
1017 	struct sockaddr *mac_addr = p;
1018 	int ret;
1019 
1020 	if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1021 		return -EADDRNOTAVAIL;
1022 
1023 	ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1024 	if (ret) {
1025 		netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1026 		return ret;
1027 	}
1028 
1029 	ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1030 
1031 	return 0;
1032 }
1033 
hns3_nic_set_features(struct net_device * netdev,netdev_features_t features)1034 static int hns3_nic_set_features(struct net_device *netdev,
1035 				 netdev_features_t features)
1036 {
1037 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1038 
1039 	if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1040 		priv->ops.fill_desc = hns3_fill_desc_tso;
1041 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1042 	} else {
1043 		priv->ops.fill_desc = hns3_fill_desc;
1044 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1045 	}
1046 
1047 	netdev->features = features;
1048 	return 0;
1049 }
1050 
1051 static void
hns3_nic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1052 hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1053 {
1054 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1055 	int queue_num = priv->ae_handle->kinfo.num_tqps;
1056 	struct hns3_enet_ring *ring;
1057 	unsigned int start;
1058 	unsigned int idx;
1059 	u64 tx_bytes = 0;
1060 	u64 rx_bytes = 0;
1061 	u64 tx_pkts = 0;
1062 	u64 rx_pkts = 0;
1063 	u64 tx_drop = 0;
1064 	u64 rx_drop = 0;
1065 
1066 	for (idx = 0; idx < queue_num; idx++) {
1067 		/* fetch the tx stats */
1068 		ring = priv->ring_data[idx].ring;
1069 		do {
1070 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1071 			tx_bytes += ring->stats.tx_bytes;
1072 			tx_pkts += ring->stats.tx_pkts;
1073 			tx_drop += ring->stats.tx_busy;
1074 			tx_drop += ring->stats.sw_err_cnt;
1075 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1076 
1077 		/* fetch the rx stats */
1078 		ring = priv->ring_data[idx + queue_num].ring;
1079 		do {
1080 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1081 			rx_bytes += ring->stats.rx_bytes;
1082 			rx_pkts += ring->stats.rx_pkts;
1083 			rx_drop += ring->stats.non_vld_descs;
1084 			rx_drop += ring->stats.err_pkt_len;
1085 			rx_drop += ring->stats.l2_err;
1086 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1087 	}
1088 
1089 	stats->tx_bytes = tx_bytes;
1090 	stats->tx_packets = tx_pkts;
1091 	stats->rx_bytes = rx_bytes;
1092 	stats->rx_packets = rx_pkts;
1093 
1094 	stats->rx_errors = netdev->stats.rx_errors;
1095 	stats->multicast = netdev->stats.multicast;
1096 	stats->rx_length_errors = netdev->stats.rx_length_errors;
1097 	stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1098 	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1099 
1100 	stats->tx_errors = netdev->stats.tx_errors;
1101 	stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1102 	stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1103 	stats->collisions = netdev->stats.collisions;
1104 	stats->rx_over_errors = netdev->stats.rx_over_errors;
1105 	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1106 	stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1107 	stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1108 	stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1109 	stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1110 	stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1111 	stats->tx_window_errors = netdev->stats.tx_window_errors;
1112 	stats->rx_compressed = netdev->stats.rx_compressed;
1113 	stats->tx_compressed = netdev->stats.tx_compressed;
1114 }
1115 
hns3_add_tunnel_port(struct net_device * netdev,u16 port,enum hns3_udp_tnl_type type)1116 static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1117 				 enum hns3_udp_tnl_type type)
1118 {
1119 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1120 	struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1121 	struct hnae3_handle *h = priv->ae_handle;
1122 
1123 	if (udp_tnl->used && udp_tnl->dst_port == port) {
1124 		udp_tnl->used++;
1125 		return;
1126 	}
1127 
1128 	if (udp_tnl->used) {
1129 		netdev_warn(netdev,
1130 			    "UDP tunnel [%d], port [%d] offload\n", type, port);
1131 		return;
1132 	}
1133 
1134 	udp_tnl->dst_port = port;
1135 	udp_tnl->used = 1;
1136 	/* TBD send command to hardware to add port */
1137 	if (h->ae_algo->ops->add_tunnel_udp)
1138 		h->ae_algo->ops->add_tunnel_udp(h, port);
1139 }
1140 
hns3_del_tunnel_port(struct net_device * netdev,u16 port,enum hns3_udp_tnl_type type)1141 static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1142 				 enum hns3_udp_tnl_type type)
1143 {
1144 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1145 	struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1146 	struct hnae3_handle *h = priv->ae_handle;
1147 
1148 	if (!udp_tnl->used || udp_tnl->dst_port != port) {
1149 		netdev_warn(netdev,
1150 			    "Invalid UDP tunnel port %d\n", port);
1151 		return;
1152 	}
1153 
1154 	udp_tnl->used--;
1155 	if (udp_tnl->used)
1156 		return;
1157 
1158 	udp_tnl->dst_port = 0;
1159 	/* TBD send command to hardware to del port  */
1160 	if (h->ae_algo->ops->del_tunnel_udp)
1161 		h->ae_algo->ops->del_tunnel_udp(h, port);
1162 }
1163 
1164 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1165  * @netdev: This physical ports's netdev
1166  * @ti: Tunnel information
1167  */
hns3_nic_udp_tunnel_add(struct net_device * netdev,struct udp_tunnel_info * ti)1168 static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1169 				    struct udp_tunnel_info *ti)
1170 {
1171 	u16 port_n = ntohs(ti->port);
1172 
1173 	switch (ti->type) {
1174 	case UDP_TUNNEL_TYPE_VXLAN:
1175 		hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1176 		break;
1177 	case UDP_TUNNEL_TYPE_GENEVE:
1178 		hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1179 		break;
1180 	default:
1181 		netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1182 		break;
1183 	}
1184 }
1185 
hns3_nic_udp_tunnel_del(struct net_device * netdev,struct udp_tunnel_info * ti)1186 static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1187 				    struct udp_tunnel_info *ti)
1188 {
1189 	u16 port_n = ntohs(ti->port);
1190 
1191 	switch (ti->type) {
1192 	case UDP_TUNNEL_TYPE_VXLAN:
1193 		hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1194 		break;
1195 	case UDP_TUNNEL_TYPE_GENEVE:
1196 		hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1197 		break;
1198 	default:
1199 		break;
1200 	}
1201 }
1202 
hns3_setup_tc(struct net_device * netdev,u8 tc)1203 static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1204 {
1205 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1206 	struct hnae3_handle *h = priv->ae_handle;
1207 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
1208 	unsigned int i;
1209 	int ret;
1210 
1211 	if (tc > HNAE3_MAX_TC)
1212 		return -EINVAL;
1213 
1214 	if (kinfo->num_tc == tc)
1215 		return 0;
1216 
1217 	if (!netdev)
1218 		return -EINVAL;
1219 
1220 	if (!tc) {
1221 		netdev_reset_tc(netdev);
1222 		return 0;
1223 	}
1224 
1225 	/* Set num_tc for netdev */
1226 	ret = netdev_set_num_tc(netdev, tc);
1227 	if (ret)
1228 		return ret;
1229 
1230 	/* Set per TC queues for the VSI */
1231 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1232 		if (kinfo->tc_info[i].enable)
1233 			netdev_set_tc_queue(netdev,
1234 					    kinfo->tc_info[i].tc,
1235 					    kinfo->tc_info[i].tqp_count,
1236 					    kinfo->tc_info[i].tqp_offset);
1237 	}
1238 
1239 	return 0;
1240 }
1241 
hns3_nic_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1242 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1243 			     void *type_data)
1244 {
1245 	struct tc_mqprio_qopt *mqprio = type_data;
1246 
1247 	if (type != TC_SETUP_MQPRIO)
1248 		return -EOPNOTSUPP;
1249 
1250 	return hns3_setup_tc(dev, mqprio->num_tc);
1251 }
1252 
hns3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1253 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1254 				__be16 proto, u16 vid)
1255 {
1256 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1257 	struct hnae3_handle *h = priv->ae_handle;
1258 	int ret = -EIO;
1259 
1260 	if (h->ae_algo->ops->set_vlan_filter)
1261 		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1262 
1263 	return ret;
1264 }
1265 
hns3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1266 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1267 				 __be16 proto, u16 vid)
1268 {
1269 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1270 	struct hnae3_handle *h = priv->ae_handle;
1271 	int ret = -EIO;
1272 
1273 	if (h->ae_algo->ops->set_vlan_filter)
1274 		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1275 
1276 	return ret;
1277 }
1278 
hns3_ndo_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)1279 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1280 				u8 qos, __be16 vlan_proto)
1281 {
1282 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1283 	struct hnae3_handle *h = priv->ae_handle;
1284 	int ret = -EIO;
1285 
1286 	if (h->ae_algo->ops->set_vf_vlan_filter)
1287 		ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1288 						   qos, vlan_proto);
1289 
1290 	return ret;
1291 }
1292 
hns3_nic_change_mtu(struct net_device * netdev,int new_mtu)1293 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1294 {
1295 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1296 	struct hnae3_handle *h = priv->ae_handle;
1297 	bool if_running = netif_running(netdev);
1298 	int ret;
1299 
1300 	if (!h->ae_algo->ops->set_mtu)
1301 		return -EOPNOTSUPP;
1302 
1303 	/* if this was called with netdev up then bring netdevice down */
1304 	if (if_running) {
1305 		(void)hns3_nic_net_stop(netdev);
1306 		msleep(100);
1307 	}
1308 
1309 	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1310 	if (ret)
1311 		netdev_err(netdev, "failed to change MTU in hardware %d\n",
1312 			   ret);
1313 	else
1314 		netdev->mtu = new_mtu;
1315 
1316 	/* if the netdev was running earlier, bring it up again */
1317 	if (if_running && hns3_nic_net_open(netdev))
1318 		ret = -EINVAL;
1319 
1320 	return ret;
1321 }
1322 
1323 static const struct net_device_ops hns3_nic_netdev_ops = {
1324 	.ndo_open		= hns3_nic_net_open,
1325 	.ndo_stop		= hns3_nic_net_stop,
1326 	.ndo_start_xmit		= hns3_nic_net_xmit,
1327 	.ndo_set_mac_address	= hns3_nic_net_set_mac_address,
1328 	.ndo_change_mtu		= hns3_nic_change_mtu,
1329 	.ndo_set_features	= hns3_nic_set_features,
1330 	.ndo_get_stats64	= hns3_nic_get_stats64,
1331 	.ndo_setup_tc		= hns3_nic_setup_tc,
1332 	.ndo_set_rx_mode	= hns3_nic_set_rx_mode,
1333 	.ndo_udp_tunnel_add	= hns3_nic_udp_tunnel_add,
1334 	.ndo_udp_tunnel_del	= hns3_nic_udp_tunnel_del,
1335 	.ndo_vlan_rx_add_vid	= hns3_vlan_rx_add_vid,
1336 	.ndo_vlan_rx_kill_vid	= hns3_vlan_rx_kill_vid,
1337 	.ndo_set_vf_vlan	= hns3_ndo_set_vf_vlan,
1338 };
1339 
1340 /* hns3_probe - Device initialization routine
1341  * @pdev: PCI device information struct
1342  * @ent: entry in hns3_pci_tbl
1343  *
1344  * hns3_probe initializes a PF identified by a pci_dev structure.
1345  * The OS initialization, configuring of the PF private structure,
1346  * and a hardware reset occur.
1347  *
1348  * Returns 0 on success, negative on failure
1349  */
hns3_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1350 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1351 {
1352 	struct hnae3_ae_dev *ae_dev;
1353 	int ret;
1354 
1355 	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1356 			      GFP_KERNEL);
1357 	if (!ae_dev) {
1358 		ret = -ENOMEM;
1359 		return ret;
1360 	}
1361 
1362 	ae_dev->pdev = pdev;
1363 	ae_dev->flag = ent->driver_data;
1364 	ae_dev->dev_type = HNAE3_DEV_KNIC;
1365 	pci_set_drvdata(pdev, ae_dev);
1366 
1367 	return hnae3_register_ae_dev(ae_dev);
1368 }
1369 
1370 /* hns3_remove - Device removal routine
1371  * @pdev: PCI device information struct
1372  */
hns3_remove(struct pci_dev * pdev)1373 static void hns3_remove(struct pci_dev *pdev)
1374 {
1375 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1376 
1377 	hnae3_unregister_ae_dev(ae_dev);
1378 
1379 	devm_kfree(&pdev->dev, ae_dev);
1380 
1381 	pci_set_drvdata(pdev, NULL);
1382 }
1383 
1384 static struct pci_driver hns3_driver = {
1385 	.name     = hns3_driver_name,
1386 	.id_table = hns3_pci_tbl,
1387 	.probe    = hns3_probe,
1388 	.remove   = hns3_remove,
1389 };
1390 
1391 /* set default feature to hns3 */
hns3_set_default_feature(struct net_device * netdev)1392 static void hns3_set_default_feature(struct net_device *netdev)
1393 {
1394 	netdev->priv_flags |= IFF_UNICAST_FLT;
1395 
1396 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1397 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1398 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1399 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1400 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1401 
1402 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1403 
1404 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1405 
1406 	netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1407 		NETIF_F_HW_VLAN_CTAG_FILTER |
1408 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1409 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1410 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1411 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1412 
1413 	netdev->vlan_features |=
1414 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1415 		NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1416 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1417 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1418 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1419 
1420 	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1421 		NETIF_F_HW_VLAN_CTAG_FILTER |
1422 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1423 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1424 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1425 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1426 }
1427 
hns3_alloc_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)1428 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1429 			     struct hns3_desc_cb *cb)
1430 {
1431 	unsigned int order = hnae_page_order(ring);
1432 	struct page *p;
1433 
1434 	p = dev_alloc_pages(order);
1435 	if (!p)
1436 		return -ENOMEM;
1437 
1438 	cb->priv = p;
1439 	cb->page_offset = 0;
1440 	cb->reuse_flag = 0;
1441 	cb->buf  = page_address(p);
1442 	cb->length = hnae_page_size(ring);
1443 	cb->type = DESC_TYPE_PAGE;
1444 
1445 	memset(cb->buf, 0, cb->length);
1446 
1447 	return 0;
1448 }
1449 
hns3_free_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)1450 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1451 			     struct hns3_desc_cb *cb)
1452 {
1453 	if (cb->type == DESC_TYPE_SKB)
1454 		dev_kfree_skb_any((struct sk_buff *)cb->priv);
1455 	else if (!HNAE3_IS_TX_RING(ring))
1456 		put_page((struct page *)cb->priv);
1457 	memset(cb, 0, sizeof(*cb));
1458 }
1459 
hns3_map_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)1460 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1461 {
1462 	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1463 			       cb->length, ring_to_dma_dir(ring));
1464 
1465 	if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1466 		return -EIO;
1467 
1468 	return 0;
1469 }
1470 
hns3_unmap_buffer(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)1471 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1472 			      struct hns3_desc_cb *cb)
1473 {
1474 	if (cb->type == DESC_TYPE_SKB)
1475 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1476 				 ring_to_dma_dir(ring));
1477 	else
1478 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1479 			       ring_to_dma_dir(ring));
1480 }
1481 
hns3_buffer_detach(struct hns3_enet_ring * ring,int i)1482 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1483 {
1484 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1485 	ring->desc[i].addr = 0;
1486 }
1487 
hns3_free_buffer_detach(struct hns3_enet_ring * ring,int i)1488 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1489 {
1490 	struct hns3_desc_cb *cb = &ring->desc_cb[i];
1491 
1492 	if (!ring->desc_cb[i].dma)
1493 		return;
1494 
1495 	hns3_buffer_detach(ring, i);
1496 	hns3_free_buffer(ring, cb);
1497 }
1498 
hns3_free_buffers(struct hns3_enet_ring * ring)1499 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1500 {
1501 	int i;
1502 
1503 	for (i = 0; i < ring->desc_num; i++)
1504 		hns3_free_buffer_detach(ring, i);
1505 }
1506 
1507 /* free desc along with its attached buffer */
hns3_free_desc(struct hns3_enet_ring * ring)1508 static void hns3_free_desc(struct hns3_enet_ring *ring)
1509 {
1510 	hns3_free_buffers(ring);
1511 
1512 	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1513 			 ring->desc_num * sizeof(ring->desc[0]),
1514 			 DMA_BIDIRECTIONAL);
1515 	ring->desc_dma_addr = 0;
1516 	kfree(ring->desc);
1517 	ring->desc = NULL;
1518 }
1519 
hns3_alloc_desc(struct hns3_enet_ring * ring)1520 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1521 {
1522 	int size = ring->desc_num * sizeof(ring->desc[0]);
1523 
1524 	ring->desc = kzalloc(size, GFP_KERNEL);
1525 	if (!ring->desc)
1526 		return -ENOMEM;
1527 
1528 	ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1529 					     size, DMA_BIDIRECTIONAL);
1530 	if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1531 		ring->desc_dma_addr = 0;
1532 		kfree(ring->desc);
1533 		ring->desc = NULL;
1534 		return -ENOMEM;
1535 	}
1536 
1537 	return 0;
1538 }
1539 
hns3_reserve_buffer_map(struct hns3_enet_ring * ring,struct hns3_desc_cb * cb)1540 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1541 				   struct hns3_desc_cb *cb)
1542 {
1543 	int ret;
1544 
1545 	ret = hns3_alloc_buffer(ring, cb);
1546 	if (ret)
1547 		goto out;
1548 
1549 	ret = hns3_map_buffer(ring, cb);
1550 	if (ret)
1551 		goto out_with_buf;
1552 
1553 	return 0;
1554 
1555 out_with_buf:
1556 	hns3_free_buffer(ring, cb);
1557 out:
1558 	return ret;
1559 }
1560 
hns3_alloc_buffer_attach(struct hns3_enet_ring * ring,int i)1561 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1562 {
1563 	int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1564 
1565 	if (ret)
1566 		return ret;
1567 
1568 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1569 
1570 	return 0;
1571 }
1572 
1573 /* Allocate memory for raw pkg, and map with dma */
hns3_alloc_ring_buffers(struct hns3_enet_ring * ring)1574 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1575 {
1576 	int i, j, ret;
1577 
1578 	for (i = 0; i < ring->desc_num; i++) {
1579 		ret = hns3_alloc_buffer_attach(ring, i);
1580 		if (ret)
1581 			goto out_buffer_fail;
1582 	}
1583 
1584 	return 0;
1585 
1586 out_buffer_fail:
1587 	for (j = i - 1; j >= 0; j--)
1588 		hns3_free_buffer_detach(ring, j);
1589 	return ret;
1590 }
1591 
1592 /* detach a in-used buffer and replace with a reserved one  */
hns3_replace_buffer(struct hns3_enet_ring * ring,int i,struct hns3_desc_cb * res_cb)1593 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1594 				struct hns3_desc_cb *res_cb)
1595 {
1596 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1597 	ring->desc_cb[i] = *res_cb;
1598 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1599 	ring->desc[i].rx.bd_base_info = 0;
1600 }
1601 
hns3_reuse_buffer(struct hns3_enet_ring * ring,int i)1602 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1603 {
1604 	ring->desc_cb[i].reuse_flag = 0;
1605 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1606 		+ ring->desc_cb[i].page_offset);
1607 	ring->desc[i].rx.bd_base_info = 0;
1608 }
1609 
hns3_nic_reclaim_one_desc(struct hns3_enet_ring * ring,int * bytes,int * pkts)1610 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1611 				      int *pkts)
1612 {
1613 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1614 
1615 	(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1616 	(*bytes) += desc_cb->length;
1617 	/* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1618 	hns3_free_buffer_detach(ring, ring->next_to_clean);
1619 
1620 	ring_ptr_move_fw(ring, next_to_clean);
1621 }
1622 
is_valid_clean_head(struct hns3_enet_ring * ring,int h)1623 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1624 {
1625 	int u = ring->next_to_use;
1626 	int c = ring->next_to_clean;
1627 
1628 	if (unlikely(h > ring->desc_num))
1629 		return 0;
1630 
1631 	return u > c ? (h > c && h <= u) : (h > c || h <= u);
1632 }
1633 
hns3_clean_tx_ring(struct hns3_enet_ring * ring,int budget)1634 int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1635 {
1636 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1637 	struct netdev_queue *dev_queue;
1638 	int bytes, pkts;
1639 	int head;
1640 
1641 	head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1642 	rmb(); /* Make sure head is ready before touch any data */
1643 
1644 	if (is_ring_empty(ring) || head == ring->next_to_clean)
1645 		return 0; /* no data to poll */
1646 
1647 	if (!is_valid_clean_head(ring, head)) {
1648 		netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1649 			   ring->next_to_use, ring->next_to_clean);
1650 
1651 		u64_stats_update_begin(&ring->syncp);
1652 		ring->stats.io_err_cnt++;
1653 		u64_stats_update_end(&ring->syncp);
1654 		return -EIO;
1655 	}
1656 
1657 	bytes = 0;
1658 	pkts = 0;
1659 	while (head != ring->next_to_clean && budget) {
1660 		hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1661 		/* Issue prefetch for next Tx descriptor */
1662 		prefetch(&ring->desc_cb[ring->next_to_clean]);
1663 		budget--;
1664 	}
1665 
1666 	ring->tqp_vector->tx_group.total_bytes += bytes;
1667 	ring->tqp_vector->tx_group.total_packets += pkts;
1668 
1669 	u64_stats_update_begin(&ring->syncp);
1670 	ring->stats.tx_bytes += bytes;
1671 	ring->stats.tx_pkts += pkts;
1672 	u64_stats_update_end(&ring->syncp);
1673 
1674 	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1675 	netdev_tx_completed_queue(dev_queue, pkts, bytes);
1676 
1677 	if (unlikely(pkts && netif_carrier_ok(netdev) &&
1678 		     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1679 		/* Make sure that anybody stopping the queue after this
1680 		 * sees the new next_to_clean.
1681 		 */
1682 		smp_mb();
1683 		if (netif_tx_queue_stopped(dev_queue)) {
1684 			netif_tx_wake_queue(dev_queue);
1685 			ring->stats.restart_queue++;
1686 		}
1687 	}
1688 
1689 	return !!budget;
1690 }
1691 
hns3_desc_unused(struct hns3_enet_ring * ring)1692 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1693 {
1694 	int ntc = ring->next_to_clean;
1695 	int ntu = ring->next_to_use;
1696 
1697 	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1698 }
1699 
1700 static void
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring * ring,int cleand_count)1701 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1702 {
1703 	struct hns3_desc_cb *desc_cb;
1704 	struct hns3_desc_cb res_cbs;
1705 	int i, ret;
1706 
1707 	for (i = 0; i < cleand_count; i++) {
1708 		desc_cb = &ring->desc_cb[ring->next_to_use];
1709 		if (desc_cb->reuse_flag) {
1710 			u64_stats_update_begin(&ring->syncp);
1711 			ring->stats.reuse_pg_cnt++;
1712 			u64_stats_update_end(&ring->syncp);
1713 
1714 			hns3_reuse_buffer(ring, ring->next_to_use);
1715 		} else {
1716 			ret = hns3_reserve_buffer_map(ring, &res_cbs);
1717 			if (ret) {
1718 				u64_stats_update_begin(&ring->syncp);
1719 				ring->stats.sw_err_cnt++;
1720 				u64_stats_update_end(&ring->syncp);
1721 
1722 				netdev_err(ring->tqp->handle->kinfo.netdev,
1723 					   "hnae reserve buffer map failed.\n");
1724 				break;
1725 			}
1726 			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1727 		}
1728 
1729 		ring_ptr_move_fw(ring, next_to_use);
1730 	}
1731 
1732 	wmb(); /* Make all data has been write before submit */
1733 	writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1734 }
1735 
1736 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1737  * @data: pointer to the start of the headers
1738  * @max: total length of section to find headers in
1739  *
1740  * This function is meant to determine the length of headers that will
1741  * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1742  * motivation of doing this is to only perform one pull for IPv4 TCP
1743  * packets so that we can do basic things like calculating the gso_size
1744  * based on the average data per packet.
1745  */
hns3_nic_get_headlen(unsigned char * data,u32 flag,unsigned int max_size)1746 static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1747 					 unsigned int max_size)
1748 {
1749 	unsigned char *network;
1750 	u8 hlen;
1751 
1752 	/* This should never happen, but better safe than sorry */
1753 	if (max_size < ETH_HLEN)
1754 		return max_size;
1755 
1756 	/* Initialize network frame pointer */
1757 	network = data;
1758 
1759 	/* Set first protocol and move network header forward */
1760 	network += ETH_HLEN;
1761 
1762 	/* Handle any vlan tag if present */
1763 	if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1764 		== HNS3_RX_FLAG_VLAN_PRESENT) {
1765 		if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1766 			return max_size;
1767 
1768 		network += VLAN_HLEN;
1769 	}
1770 
1771 	/* Handle L3 protocols */
1772 	if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1773 		== HNS3_RX_FLAG_L3ID_IPV4) {
1774 		if ((typeof(max_size))(network - data) >
1775 		    (max_size - sizeof(struct iphdr)))
1776 			return max_size;
1777 
1778 		/* Access ihl as a u8 to avoid unaligned access on ia64 */
1779 		hlen = (network[0] & 0x0F) << 2;
1780 
1781 		/* Verify hlen meets minimum size requirements */
1782 		if (hlen < sizeof(struct iphdr))
1783 			return network - data;
1784 
1785 		/* Record next protocol if header is present */
1786 	} else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1787 		== HNS3_RX_FLAG_L3ID_IPV6) {
1788 		if ((typeof(max_size))(network - data) >
1789 		    (max_size - sizeof(struct ipv6hdr)))
1790 			return max_size;
1791 
1792 		/* Record next protocol */
1793 		hlen = sizeof(struct ipv6hdr);
1794 	} else {
1795 		return network - data;
1796 	}
1797 
1798 	/* Relocate pointer to start of L4 header */
1799 	network += hlen;
1800 
1801 	/* Finally sort out TCP/UDP */
1802 	if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1803 		== HNS3_RX_FLAG_L4ID_TCP) {
1804 		if ((typeof(max_size))(network - data) >
1805 		    (max_size - sizeof(struct tcphdr)))
1806 			return max_size;
1807 
1808 		/* Access doff as a u8 to avoid unaligned access on ia64 */
1809 		hlen = (network[12] & 0xF0) >> 2;
1810 
1811 		/* Verify hlen meets minimum size requirements */
1812 		if (hlen < sizeof(struct tcphdr))
1813 			return network - data;
1814 
1815 		network += hlen;
1816 	} else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1817 		== HNS3_RX_FLAG_L4ID_UDP) {
1818 		if ((typeof(max_size))(network - data) >
1819 		    (max_size - sizeof(struct udphdr)))
1820 			return max_size;
1821 
1822 		network += sizeof(struct udphdr);
1823 	}
1824 
1825 	/* If everything has gone correctly network should be the
1826 	 * data section of the packet and will be the end of the header.
1827 	 * If not then it probably represents the end of the last recognized
1828 	 * header.
1829 	 */
1830 	if ((typeof(max_size))(network - data) < max_size)
1831 		return network - data;
1832 	else
1833 		return max_size;
1834 }
1835 
hns3_nic_reuse_page(struct sk_buff * skb,int i,struct hns3_enet_ring * ring,int pull_len,struct hns3_desc_cb * desc_cb)1836 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1837 				struct hns3_enet_ring *ring, int pull_len,
1838 				struct hns3_desc_cb *desc_cb)
1839 {
1840 	struct hns3_desc *desc;
1841 	int truesize, size;
1842 	int last_offset;
1843 	bool twobufs;
1844 
1845 	twobufs = ((PAGE_SIZE < 8192) &&
1846 		hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1847 
1848 	desc = &ring->desc[ring->next_to_clean];
1849 	size = le16_to_cpu(desc->rx.size);
1850 
1851 	if (twobufs) {
1852 		truesize = hnae_buf_size(ring);
1853 	} else {
1854 		truesize = ALIGN(size, L1_CACHE_BYTES);
1855 		last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1856 	}
1857 
1858 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1859 			size - pull_len, truesize - pull_len);
1860 
1861 	 /* Avoid re-using remote pages,flag default unreuse */
1862 	if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1863 		return;
1864 
1865 	if (twobufs) {
1866 		/* If we are only owner of page we can reuse it */
1867 		if (likely(page_count(desc_cb->priv) == 1)) {
1868 			/* Flip page offset to other buffer */
1869 			desc_cb->page_offset ^= truesize;
1870 
1871 			desc_cb->reuse_flag = 1;
1872 			/* bump ref count on page before it is given*/
1873 			get_page(desc_cb->priv);
1874 		}
1875 		return;
1876 	}
1877 
1878 	/* Move offset up to the next cache line */
1879 	desc_cb->page_offset += truesize;
1880 
1881 	if (desc_cb->page_offset <= last_offset) {
1882 		desc_cb->reuse_flag = 1;
1883 		/* Bump ref count on page before it is given*/
1884 		get_page(desc_cb->priv);
1885 	}
1886 }
1887 
hns3_rx_checksum(struct hns3_enet_ring * ring,struct sk_buff * skb,struct hns3_desc * desc)1888 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1889 			     struct hns3_desc *desc)
1890 {
1891 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1892 	int l3_type, l4_type;
1893 	u32 bd_base_info;
1894 	int ol4_type;
1895 	u32 l234info;
1896 
1897 	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1898 	l234info = le32_to_cpu(desc->rx.l234_info);
1899 
1900 	skb->ip_summed = CHECKSUM_NONE;
1901 
1902 	skb_checksum_none_assert(skb);
1903 
1904 	if (!(netdev->features & NETIF_F_RXCSUM))
1905 		return;
1906 
1907 	/* check if hardware has done checksum */
1908 	if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1909 		return;
1910 
1911 	if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1912 		     hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1913 		     hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1914 		     hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1915 		netdev_err(netdev, "L3/L4 error pkt\n");
1916 		u64_stats_update_begin(&ring->syncp);
1917 		ring->stats.l3l4_csum_err++;
1918 		u64_stats_update_end(&ring->syncp);
1919 
1920 		return;
1921 	}
1922 
1923 	l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1924 				 HNS3_RXD_L3ID_S);
1925 	l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1926 				 HNS3_RXD_L4ID_S);
1927 
1928 	ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1929 	switch (ol4_type) {
1930 	case HNS3_OL4_TYPE_MAC_IN_UDP:
1931 	case HNS3_OL4_TYPE_NVGRE:
1932 		skb->csum_level = 1;
1933 	case HNS3_OL4_TYPE_NO_TUN:
1934 		/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1935 		if (l3_type == HNS3_L3_TYPE_IPV4 ||
1936 		    (l3_type == HNS3_L3_TYPE_IPV6 &&
1937 		     (l4_type == HNS3_L4_TYPE_UDP ||
1938 		      l4_type == HNS3_L4_TYPE_TCP ||
1939 		      l4_type == HNS3_L4_TYPE_SCTP)))
1940 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1941 		break;
1942 	}
1943 }
1944 
hns3_handle_rx_bd(struct hns3_enet_ring * ring,struct sk_buff ** out_skb,int * out_bnum)1945 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1946 			     struct sk_buff **out_skb, int *out_bnum)
1947 {
1948 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1949 	struct hns3_desc_cb *desc_cb;
1950 	struct hns3_desc *desc;
1951 	struct sk_buff *skb;
1952 	unsigned char *va;
1953 	u32 bd_base_info;
1954 	int pull_len;
1955 	u32 l234info;
1956 	int length;
1957 	int bnum;
1958 
1959 	desc = &ring->desc[ring->next_to_clean];
1960 	desc_cb = &ring->desc_cb[ring->next_to_clean];
1961 
1962 	prefetch(desc);
1963 
1964 	length = le16_to_cpu(desc->rx.pkt_len);
1965 	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1966 	l234info = le32_to_cpu(desc->rx.l234_info);
1967 
1968 	/* Check valid BD */
1969 	if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1970 		return -EFAULT;
1971 
1972 	va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1973 
1974 	/* Prefetch first cache line of first page
1975 	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1976 	 * line size is 64B so need to prefetch twice to make it 128B. But in
1977 	 * actual we can have greater size of caches with 128B Level 1 cache
1978 	 * lines. In such a case, single fetch would suffice to cache in the
1979 	 * relevant part of the header.
1980 	 */
1981 	prefetch(va);
1982 #if L1_CACHE_BYTES < 128
1983 	prefetch(va + L1_CACHE_BYTES);
1984 #endif
1985 
1986 	skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1987 					HNS3_RX_HEAD_SIZE);
1988 	if (unlikely(!skb)) {
1989 		netdev_err(netdev, "alloc rx skb fail\n");
1990 
1991 		u64_stats_update_begin(&ring->syncp);
1992 		ring->stats.sw_err_cnt++;
1993 		u64_stats_update_end(&ring->syncp);
1994 
1995 		return -ENOMEM;
1996 	}
1997 
1998 	prefetchw(skb->data);
1999 
2000 	bnum = 1;
2001 	if (length <= HNS3_RX_HEAD_SIZE) {
2002 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2003 
2004 		/* We can reuse buffer as-is, just make sure it is local */
2005 		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2006 			desc_cb->reuse_flag = 1;
2007 		else /* This page cannot be reused so discard it */
2008 			put_page(desc_cb->priv);
2009 
2010 		ring_ptr_move_fw(ring, next_to_clean);
2011 	} else {
2012 		u64_stats_update_begin(&ring->syncp);
2013 		ring->stats.seg_pkt_cnt++;
2014 		u64_stats_update_end(&ring->syncp);
2015 
2016 		pull_len = hns3_nic_get_headlen(va, l234info,
2017 						HNS3_RX_HEAD_SIZE);
2018 		memcpy(__skb_put(skb, pull_len), va,
2019 		       ALIGN(pull_len, sizeof(long)));
2020 
2021 		hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2022 		ring_ptr_move_fw(ring, next_to_clean);
2023 
2024 		while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2025 			desc = &ring->desc[ring->next_to_clean];
2026 			desc_cb = &ring->desc_cb[ring->next_to_clean];
2027 			bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2028 			hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2029 			ring_ptr_move_fw(ring, next_to_clean);
2030 			bnum++;
2031 		}
2032 	}
2033 
2034 	*out_bnum = bnum;
2035 
2036 	if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2037 		netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2038 			   ((u64 *)desc)[0], ((u64 *)desc)[1]);
2039 		u64_stats_update_begin(&ring->syncp);
2040 		ring->stats.non_vld_descs++;
2041 		u64_stats_update_end(&ring->syncp);
2042 
2043 		dev_kfree_skb_any(skb);
2044 		return -EINVAL;
2045 	}
2046 
2047 	if (unlikely((!desc->rx.pkt_len) ||
2048 		     hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2049 		netdev_err(netdev, "truncated pkt\n");
2050 		u64_stats_update_begin(&ring->syncp);
2051 		ring->stats.err_pkt_len++;
2052 		u64_stats_update_end(&ring->syncp);
2053 
2054 		dev_kfree_skb_any(skb);
2055 		return -EFAULT;
2056 	}
2057 
2058 	if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2059 		netdev_err(netdev, "L2 error pkt\n");
2060 		u64_stats_update_begin(&ring->syncp);
2061 		ring->stats.l2_err++;
2062 		u64_stats_update_end(&ring->syncp);
2063 
2064 		dev_kfree_skb_any(skb);
2065 		return -EFAULT;
2066 	}
2067 
2068 	u64_stats_update_begin(&ring->syncp);
2069 	ring->stats.rx_pkts++;
2070 	ring->stats.rx_bytes += skb->len;
2071 	u64_stats_update_end(&ring->syncp);
2072 
2073 	ring->tqp_vector->rx_group.total_bytes += skb->len;
2074 
2075 	hns3_rx_checksum(ring, skb, desc);
2076 	return 0;
2077 }
2078 
hns3_clean_rx_ring(struct hns3_enet_ring * ring,int budget)2079 static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2080 {
2081 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2082 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2083 	int recv_pkts, recv_bds, clean_count, err;
2084 	int unused_count = hns3_desc_unused(ring);
2085 	struct sk_buff *skb = NULL;
2086 	int num, bnum = 0;
2087 
2088 	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2089 	rmb(); /* Make sure num taken effect before the other data is touched */
2090 
2091 	recv_pkts = 0, recv_bds = 0, clean_count = 0;
2092 	num -= unused_count;
2093 
2094 	while (recv_pkts < budget && recv_bds < num) {
2095 		/* Reuse or realloc buffers */
2096 		if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2097 			hns3_nic_alloc_rx_buffers(ring,
2098 						  clean_count + unused_count);
2099 			clean_count = 0;
2100 			unused_count = hns3_desc_unused(ring);
2101 		}
2102 
2103 		/* Poll one pkt */
2104 		err = hns3_handle_rx_bd(ring, &skb, &bnum);
2105 		if (unlikely(!skb)) /* This fault cannot be repaired */
2106 			goto out;
2107 
2108 		recv_bds += bnum;
2109 		clean_count += bnum;
2110 		if (unlikely(err)) {  /* Do jump the err */
2111 			recv_pkts++;
2112 			continue;
2113 		}
2114 
2115 		/* Do update ip stack process */
2116 		skb->protocol = eth_type_trans(skb, netdev);
2117 		(void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2118 
2119 		recv_pkts++;
2120 	}
2121 
2122 out:
2123 	/* Make all data has been write before submit */
2124 	if (clean_count + unused_count > 0)
2125 		hns3_nic_alloc_rx_buffers(ring,
2126 					  clean_count + unused_count);
2127 
2128 	return recv_pkts;
2129 }
2130 
hns3_get_new_int_gl(struct hns3_enet_ring_group * ring_group)2131 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2132 {
2133 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2134 	enum hns3_flow_level_range new_flow_level;
2135 	struct hns3_enet_tqp_vector *tqp_vector;
2136 	int packets_per_secs;
2137 	int bytes_per_usecs;
2138 	u16 new_int_gl;
2139 	int usecs;
2140 
2141 	if (!ring_group->int_gl)
2142 		return false;
2143 
2144 	if (ring_group->total_packets == 0) {
2145 		ring_group->int_gl = HNS3_INT_GL_50K;
2146 		ring_group->flow_level = HNS3_FLOW_LOW;
2147 		return true;
2148 	}
2149 
2150 	/* Simple throttlerate management
2151 	 * 0-10MB/s   lower     (50000 ints/s)
2152 	 * 10-20MB/s   middle    (20000 ints/s)
2153 	 * 20-1249MB/s high      (18000 ints/s)
2154 	 * > 40000pps  ultra     (8000 ints/s)
2155 	 */
2156 	new_flow_level = ring_group->flow_level;
2157 	new_int_gl = ring_group->int_gl;
2158 	tqp_vector = ring_group->ring->tqp_vector;
2159 	usecs = (ring_group->int_gl << 1);
2160 	bytes_per_usecs = ring_group->total_bytes / usecs;
2161 	/* 1000000 microseconds */
2162 	packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2163 
2164 	switch (new_flow_level) {
2165 	case HNS3_FLOW_LOW:
2166 		if (bytes_per_usecs > 10)
2167 			new_flow_level = HNS3_FLOW_MID;
2168 		break;
2169 	case HNS3_FLOW_MID:
2170 		if (bytes_per_usecs > 20)
2171 			new_flow_level = HNS3_FLOW_HIGH;
2172 		else if (bytes_per_usecs <= 10)
2173 			new_flow_level = HNS3_FLOW_LOW;
2174 		break;
2175 	case HNS3_FLOW_HIGH:
2176 	case HNS3_FLOW_ULTRA:
2177 	default:
2178 		if (bytes_per_usecs <= 20)
2179 			new_flow_level = HNS3_FLOW_MID;
2180 		break;
2181 	}
2182 
2183 	if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2184 	    (&tqp_vector->rx_group == ring_group))
2185 		new_flow_level = HNS3_FLOW_ULTRA;
2186 
2187 	switch (new_flow_level) {
2188 	case HNS3_FLOW_LOW:
2189 		new_int_gl = HNS3_INT_GL_50K;
2190 		break;
2191 	case HNS3_FLOW_MID:
2192 		new_int_gl = HNS3_INT_GL_20K;
2193 		break;
2194 	case HNS3_FLOW_HIGH:
2195 		new_int_gl = HNS3_INT_GL_18K;
2196 		break;
2197 	case HNS3_FLOW_ULTRA:
2198 		new_int_gl = HNS3_INT_GL_8K;
2199 		break;
2200 	default:
2201 		break;
2202 	}
2203 
2204 	ring_group->total_bytes = 0;
2205 	ring_group->total_packets = 0;
2206 	ring_group->flow_level = new_flow_level;
2207 	if (new_int_gl != ring_group->int_gl) {
2208 		ring_group->int_gl = new_int_gl;
2209 		return true;
2210 	}
2211 	return false;
2212 }
2213 
hns3_update_new_int_gl(struct hns3_enet_tqp_vector * tqp_vector)2214 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2215 {
2216 	u16 rx_int_gl, tx_int_gl;
2217 	bool rx, tx;
2218 
2219 	rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2220 	tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2221 	rx_int_gl = tqp_vector->rx_group.int_gl;
2222 	tx_int_gl = tqp_vector->tx_group.int_gl;
2223 	if (rx && tx) {
2224 		if (rx_int_gl > tx_int_gl) {
2225 			tqp_vector->tx_group.int_gl = rx_int_gl;
2226 			tqp_vector->tx_group.flow_level =
2227 				tqp_vector->rx_group.flow_level;
2228 			hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2229 		} else {
2230 			tqp_vector->rx_group.int_gl = tx_int_gl;
2231 			tqp_vector->rx_group.flow_level =
2232 				tqp_vector->tx_group.flow_level;
2233 			hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2234 		}
2235 	}
2236 }
2237 
hns3_nic_common_poll(struct napi_struct * napi,int budget)2238 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2239 {
2240 	struct hns3_enet_ring *ring;
2241 	int rx_pkt_total = 0;
2242 
2243 	struct hns3_enet_tqp_vector *tqp_vector =
2244 		container_of(napi, struct hns3_enet_tqp_vector, napi);
2245 	bool clean_complete = true;
2246 	int rx_budget;
2247 
2248 	/* Since the actual Tx work is minimal, we can give the Tx a larger
2249 	 * budget and be more aggressive about cleaning up the Tx descriptors.
2250 	 */
2251 	hns3_for_each_ring(ring, tqp_vector->tx_group) {
2252 		if (!hns3_clean_tx_ring(ring, budget))
2253 			clean_complete = false;
2254 	}
2255 
2256 	/* make sure rx ring budget not smaller than 1 */
2257 	rx_budget = max(budget / tqp_vector->num_tqps, 1);
2258 
2259 	hns3_for_each_ring(ring, tqp_vector->rx_group) {
2260 		int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2261 
2262 		if (rx_cleaned >= rx_budget)
2263 			clean_complete = false;
2264 
2265 		rx_pkt_total += rx_cleaned;
2266 	}
2267 
2268 	tqp_vector->rx_group.total_packets += rx_pkt_total;
2269 
2270 	if (!clean_complete)
2271 		return budget;
2272 
2273 	napi_complete(napi);
2274 	hns3_update_new_int_gl(tqp_vector);
2275 	hns3_mask_vector_irq(tqp_vector, 1);
2276 
2277 	return rx_pkt_total;
2278 }
2279 
hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector * tqp_vector,struct hnae3_ring_chain_node * head)2280 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2281 				      struct hnae3_ring_chain_node *head)
2282 {
2283 	struct pci_dev *pdev = tqp_vector->handle->pdev;
2284 	struct hnae3_ring_chain_node *cur_chain = head;
2285 	struct hnae3_ring_chain_node *chain;
2286 	struct hns3_enet_ring *tx_ring;
2287 	struct hns3_enet_ring *rx_ring;
2288 
2289 	tx_ring = tqp_vector->tx_group.ring;
2290 	if (tx_ring) {
2291 		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2292 		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2293 			     HNAE3_RING_TYPE_TX);
2294 
2295 		cur_chain->next = NULL;
2296 
2297 		while (tx_ring->next) {
2298 			tx_ring = tx_ring->next;
2299 
2300 			chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2301 					     GFP_KERNEL);
2302 			if (!chain)
2303 				goto err_free_chain;
2304 
2305 			cur_chain->next = chain;
2306 			chain->tqp_index = tx_ring->tqp->tqp_index;
2307 			hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2308 				     HNAE3_RING_TYPE_TX);
2309 
2310 			cur_chain = chain;
2311 		}
2312 	}
2313 
2314 	rx_ring = tqp_vector->rx_group.ring;
2315 	if (!tx_ring && rx_ring) {
2316 		cur_chain->next = NULL;
2317 		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2318 		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2319 			     HNAE3_RING_TYPE_RX);
2320 
2321 		rx_ring = rx_ring->next;
2322 	}
2323 
2324 	while (rx_ring) {
2325 		chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2326 		if (!chain)
2327 			goto err_free_chain;
2328 
2329 		cur_chain->next = chain;
2330 		chain->tqp_index = rx_ring->tqp->tqp_index;
2331 		hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2332 			     HNAE3_RING_TYPE_RX);
2333 		cur_chain = chain;
2334 
2335 		rx_ring = rx_ring->next;
2336 	}
2337 
2338 	return 0;
2339 
2340 err_free_chain:
2341 	cur_chain = head->next;
2342 	while (cur_chain) {
2343 		chain = cur_chain->next;
2344 		devm_kfree(&pdev->dev, chain);
2345 		cur_chain = chain;
2346 	}
2347 
2348 	return -ENOMEM;
2349 }
2350 
hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector * tqp_vector,struct hnae3_ring_chain_node * head)2351 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2352 					struct hnae3_ring_chain_node *head)
2353 {
2354 	struct pci_dev *pdev = tqp_vector->handle->pdev;
2355 	struct hnae3_ring_chain_node *chain_tmp, *chain;
2356 
2357 	chain = head->next;
2358 
2359 	while (chain) {
2360 		chain_tmp = chain->next;
2361 		devm_kfree(&pdev->dev, chain);
2362 		chain = chain_tmp;
2363 	}
2364 }
2365 
hns3_add_ring_to_group(struct hns3_enet_ring_group * group,struct hns3_enet_ring * ring)2366 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2367 				   struct hns3_enet_ring *ring)
2368 {
2369 	ring->next = group->ring;
2370 	group->ring = ring;
2371 
2372 	group->count++;
2373 }
2374 
hns3_nic_init_vector_data(struct hns3_nic_priv * priv)2375 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2376 {
2377 	struct hnae3_ring_chain_node vector_ring_chain;
2378 	struct hnae3_handle *h = priv->ae_handle;
2379 	struct hns3_enet_tqp_vector *tqp_vector;
2380 	struct hnae3_vector_info *vector;
2381 	struct pci_dev *pdev = h->pdev;
2382 	u16 tqp_num = h->kinfo.num_tqps;
2383 	u16 vector_num;
2384 	int ret = 0;
2385 	u16 i;
2386 
2387 	/* RSS size, cpu online and vector_num should be the same */
2388 	/* Should consider 2p/4p later */
2389 	vector_num = min_t(u16, num_online_cpus(), tqp_num);
2390 	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2391 			      GFP_KERNEL);
2392 	if (!vector)
2393 		return -ENOMEM;
2394 
2395 	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2396 
2397 	priv->vector_num = vector_num;
2398 	priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2399 		devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2400 			     GFP_KERNEL);
2401 	if (!priv->tqp_vector)
2402 		return -ENOMEM;
2403 
2404 	for (i = 0; i < tqp_num; i++) {
2405 		u16 vector_i = i % vector_num;
2406 
2407 		tqp_vector = &priv->tqp_vector[vector_i];
2408 
2409 		hns3_add_ring_to_group(&tqp_vector->tx_group,
2410 				       priv->ring_data[i].ring);
2411 
2412 		hns3_add_ring_to_group(&tqp_vector->rx_group,
2413 				       priv->ring_data[i + tqp_num].ring);
2414 
2415 		tqp_vector->idx = vector_i;
2416 		tqp_vector->mask_addr = vector[vector_i].io_addr;
2417 		tqp_vector->vector_irq = vector[vector_i].vector;
2418 		tqp_vector->num_tqps++;
2419 
2420 		priv->ring_data[i].ring->tqp_vector = tqp_vector;
2421 		priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2422 	}
2423 
2424 	for (i = 0; i < vector_num; i++) {
2425 		tqp_vector = &priv->tqp_vector[i];
2426 
2427 		tqp_vector->rx_group.total_bytes = 0;
2428 		tqp_vector->rx_group.total_packets = 0;
2429 		tqp_vector->tx_group.total_bytes = 0;
2430 		tqp_vector->tx_group.total_packets = 0;
2431 		hns3_vector_gl_rl_init(tqp_vector);
2432 		tqp_vector->handle = h;
2433 
2434 		ret = hns3_get_vector_ring_chain(tqp_vector,
2435 						 &vector_ring_chain);
2436 		if (ret)
2437 			goto out;
2438 
2439 		ret = h->ae_algo->ops->map_ring_to_vector(h,
2440 			tqp_vector->vector_irq, &vector_ring_chain);
2441 		if (ret)
2442 			goto out;
2443 
2444 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2445 
2446 		netif_napi_add(priv->netdev, &tqp_vector->napi,
2447 			       hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2448 	}
2449 
2450 out:
2451 	devm_kfree(&pdev->dev, vector);
2452 	return ret;
2453 }
2454 
hns3_nic_uninit_vector_data(struct hns3_nic_priv * priv)2455 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2456 {
2457 	struct hnae3_ring_chain_node vector_ring_chain;
2458 	struct hnae3_handle *h = priv->ae_handle;
2459 	struct hns3_enet_tqp_vector *tqp_vector;
2460 	struct pci_dev *pdev = h->pdev;
2461 	int i, ret;
2462 
2463 	for (i = 0; i < priv->vector_num; i++) {
2464 		tqp_vector = &priv->tqp_vector[i];
2465 
2466 		ret = hns3_get_vector_ring_chain(tqp_vector,
2467 						 &vector_ring_chain);
2468 		if (ret)
2469 			return ret;
2470 
2471 		ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2472 			tqp_vector->vector_irq, &vector_ring_chain);
2473 		if (ret)
2474 			return ret;
2475 
2476 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2477 
2478 		if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2479 			(void)irq_set_affinity_hint(
2480 				priv->tqp_vector[i].vector_irq,
2481 						    NULL);
2482 			free_irq(priv->tqp_vector[i].vector_irq,
2483 				 &priv->tqp_vector[i]);
2484 		}
2485 
2486 		priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2487 
2488 		netif_napi_del(&priv->tqp_vector[i].napi);
2489 	}
2490 
2491 	devm_kfree(&pdev->dev, priv->tqp_vector);
2492 
2493 	return 0;
2494 }
2495 
hns3_ring_get_cfg(struct hnae3_queue * q,struct hns3_nic_priv * priv,int ring_type)2496 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2497 			     int ring_type)
2498 {
2499 	struct hns3_nic_ring_data *ring_data = priv->ring_data;
2500 	int queue_num = priv->ae_handle->kinfo.num_tqps;
2501 	struct pci_dev *pdev = priv->ae_handle->pdev;
2502 	struct hns3_enet_ring *ring;
2503 
2504 	ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2505 	if (!ring)
2506 		return -ENOMEM;
2507 
2508 	if (ring_type == HNAE3_RING_TYPE_TX) {
2509 		ring_data[q->tqp_index].ring = ring;
2510 		ring_data[q->tqp_index].queue_index = q->tqp_index;
2511 		ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2512 	} else {
2513 		ring_data[q->tqp_index + queue_num].ring = ring;
2514 		ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2515 		ring->io_base = q->io_base;
2516 	}
2517 
2518 	hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2519 
2520 	ring->tqp = q;
2521 	ring->desc = NULL;
2522 	ring->desc_cb = NULL;
2523 	ring->dev = priv->dev;
2524 	ring->desc_dma_addr = 0;
2525 	ring->buf_size = q->buf_size;
2526 	ring->desc_num = q->desc_num;
2527 	ring->next_to_use = 0;
2528 	ring->next_to_clean = 0;
2529 
2530 	return 0;
2531 }
2532 
hns3_queue_to_ring(struct hnae3_queue * tqp,struct hns3_nic_priv * priv)2533 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2534 			      struct hns3_nic_priv *priv)
2535 {
2536 	int ret;
2537 
2538 	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2539 	if (ret)
2540 		return ret;
2541 
2542 	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2543 	if (ret) {
2544 		devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
2545 		return ret;
2546 	}
2547 
2548 	return 0;
2549 }
2550 
hns3_get_ring_config(struct hns3_nic_priv * priv)2551 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2552 {
2553 	struct hnae3_handle *h = priv->ae_handle;
2554 	struct pci_dev *pdev = h->pdev;
2555 	int i, ret;
2556 
2557 	priv->ring_data =  devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2558 					sizeof(*priv->ring_data) * 2,
2559 					GFP_KERNEL);
2560 	if (!priv->ring_data)
2561 		return -ENOMEM;
2562 
2563 	for (i = 0; i < h->kinfo.num_tqps; i++) {
2564 		ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2565 		if (ret)
2566 			goto err;
2567 	}
2568 
2569 	return 0;
2570 err:
2571 	while (i--) {
2572 		devm_kfree(priv->dev, priv->ring_data[i].ring);
2573 		devm_kfree(priv->dev,
2574 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
2575 	}
2576 
2577 	devm_kfree(&pdev->dev, priv->ring_data);
2578 	return ret;
2579 }
2580 
hns3_alloc_ring_memory(struct hns3_enet_ring * ring)2581 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2582 {
2583 	int ret;
2584 
2585 	if (ring->desc_num <= 0 || ring->buf_size <= 0)
2586 		return -EINVAL;
2587 
2588 	ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2589 				GFP_KERNEL);
2590 	if (!ring->desc_cb) {
2591 		ret = -ENOMEM;
2592 		goto out;
2593 	}
2594 
2595 	ret = hns3_alloc_desc(ring);
2596 	if (ret)
2597 		goto out_with_desc_cb;
2598 
2599 	if (!HNAE3_IS_TX_RING(ring)) {
2600 		ret = hns3_alloc_ring_buffers(ring);
2601 		if (ret)
2602 			goto out_with_desc;
2603 	}
2604 
2605 	return 0;
2606 
2607 out_with_desc:
2608 	hns3_free_desc(ring);
2609 out_with_desc_cb:
2610 	kfree(ring->desc_cb);
2611 	ring->desc_cb = NULL;
2612 out:
2613 	return ret;
2614 }
2615 
hns3_fini_ring(struct hns3_enet_ring * ring)2616 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2617 {
2618 	hns3_free_desc(ring);
2619 	kfree(ring->desc_cb);
2620 	ring->desc_cb = NULL;
2621 	ring->next_to_clean = 0;
2622 	ring->next_to_use = 0;
2623 }
2624 
hns3_buf_size2type(u32 buf_size)2625 int hns3_buf_size2type(u32 buf_size)
2626 {
2627 	int bd_size_type;
2628 
2629 	switch (buf_size) {
2630 	case 512:
2631 		bd_size_type = HNS3_BD_SIZE_512_TYPE;
2632 		break;
2633 	case 1024:
2634 		bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2635 		break;
2636 	case 2048:
2637 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2638 		break;
2639 	case 4096:
2640 		bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2641 		break;
2642 	default:
2643 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2644 	}
2645 
2646 	return bd_size_type;
2647 }
2648 
hns3_init_ring_hw(struct hns3_enet_ring * ring)2649 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2650 {
2651 	dma_addr_t dma = ring->desc_dma_addr;
2652 	struct hnae3_queue *q = ring->tqp;
2653 
2654 	if (!HNAE3_IS_TX_RING(ring)) {
2655 		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2656 			       (u32)dma);
2657 		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2658 			       (u32)((dma >> 31) >> 1));
2659 
2660 		hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2661 			       hns3_buf_size2type(ring->buf_size));
2662 		hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2663 			       ring->desc_num / 8 - 1);
2664 
2665 	} else {
2666 		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2667 			       (u32)dma);
2668 		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2669 			       (u32)((dma >> 31) >> 1));
2670 
2671 		hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2672 			       hns3_buf_size2type(ring->buf_size));
2673 		hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2674 			       ring->desc_num / 8 - 1);
2675 	}
2676 }
2677 
hns3_init_all_ring(struct hns3_nic_priv * priv)2678 static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2679 {
2680 	struct hnae3_handle *h = priv->ae_handle;
2681 	int ring_num = h->kinfo.num_tqps * 2;
2682 	int i, j;
2683 	int ret;
2684 
2685 	for (i = 0; i < ring_num; i++) {
2686 		ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2687 		if (ret) {
2688 			dev_err(priv->dev,
2689 				"Alloc ring memory fail! ret=%d\n", ret);
2690 			goto out_when_alloc_ring_memory;
2691 		}
2692 
2693 		hns3_init_ring_hw(priv->ring_data[i].ring);
2694 
2695 		u64_stats_init(&priv->ring_data[i].ring->syncp);
2696 	}
2697 
2698 	return 0;
2699 
2700 out_when_alloc_ring_memory:
2701 	for (j = i - 1; j >= 0; j--)
2702 		hns3_fini_ring(priv->ring_data[i].ring);
2703 
2704 	return -ENOMEM;
2705 }
2706 
hns3_uninit_all_ring(struct hns3_nic_priv * priv)2707 static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2708 {
2709 	struct hnae3_handle *h = priv->ae_handle;
2710 	int i;
2711 
2712 	for (i = 0; i < h->kinfo.num_tqps; i++) {
2713 		if (h->ae_algo->ops->reset_queue)
2714 			h->ae_algo->ops->reset_queue(h, i);
2715 
2716 		hns3_fini_ring(priv->ring_data[i].ring);
2717 		devm_kfree(priv->dev, priv->ring_data[i].ring);
2718 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2719 		devm_kfree(priv->dev,
2720 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
2721 	}
2722 	devm_kfree(priv->dev, priv->ring_data);
2723 
2724 	return 0;
2725 }
2726 
2727 /* Set mac addr if it is configured. or leave it to the AE driver */
hns3_init_mac_addr(struct net_device * netdev)2728 static void hns3_init_mac_addr(struct net_device *netdev)
2729 {
2730 	struct hns3_nic_priv *priv = netdev_priv(netdev);
2731 	struct hnae3_handle *h = priv->ae_handle;
2732 	u8 mac_addr_temp[ETH_ALEN];
2733 
2734 	if (h->ae_algo->ops->get_mac_addr) {
2735 		h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2736 		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2737 	}
2738 
2739 	/* Check if the MAC address is valid, if not get a random one */
2740 	if (!is_valid_ether_addr(netdev->dev_addr)) {
2741 		eth_hw_addr_random(netdev);
2742 		dev_warn(priv->dev, "using random MAC address %pM\n",
2743 			 netdev->dev_addr);
2744 	}
2745 
2746 	if (h->ae_algo->ops->set_mac_addr)
2747 		h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2748 
2749 }
2750 
hns3_nic_set_priv_ops(struct net_device * netdev)2751 static void hns3_nic_set_priv_ops(struct net_device *netdev)
2752 {
2753 	struct hns3_nic_priv *priv = netdev_priv(netdev);
2754 
2755 	if ((netdev->features & NETIF_F_TSO) ||
2756 	    (netdev->features & NETIF_F_TSO6)) {
2757 		priv->ops.fill_desc = hns3_fill_desc_tso;
2758 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2759 	} else {
2760 		priv->ops.fill_desc = hns3_fill_desc;
2761 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2762 	}
2763 }
2764 
hns3_client_init(struct hnae3_handle * handle)2765 static int hns3_client_init(struct hnae3_handle *handle)
2766 {
2767 	struct pci_dev *pdev = handle->pdev;
2768 	struct hns3_nic_priv *priv;
2769 	struct net_device *netdev;
2770 	int ret;
2771 
2772 	netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2773 				   handle->kinfo.num_tqps);
2774 	if (!netdev)
2775 		return -ENOMEM;
2776 
2777 	priv = netdev_priv(netdev);
2778 	priv->dev = &pdev->dev;
2779 	priv->netdev = netdev;
2780 	priv->ae_handle = handle;
2781 
2782 	handle->kinfo.netdev = netdev;
2783 	handle->priv = (void *)priv;
2784 
2785 	hns3_init_mac_addr(netdev);
2786 
2787 	hns3_set_default_feature(netdev);
2788 
2789 	netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2790 	netdev->priv_flags |= IFF_UNICAST_FLT;
2791 	netdev->netdev_ops = &hns3_nic_netdev_ops;
2792 	SET_NETDEV_DEV(netdev, &pdev->dev);
2793 	hns3_ethtool_set_ops(netdev);
2794 	hns3_nic_set_priv_ops(netdev);
2795 
2796 	/* Carrier off reporting is important to ethtool even BEFORE open */
2797 	netif_carrier_off(netdev);
2798 
2799 	ret = hns3_get_ring_config(priv);
2800 	if (ret) {
2801 		ret = -ENOMEM;
2802 		goto out_get_ring_cfg;
2803 	}
2804 
2805 	ret = hns3_nic_init_vector_data(priv);
2806 	if (ret) {
2807 		ret = -ENOMEM;
2808 		goto out_init_vector_data;
2809 	}
2810 
2811 	ret = hns3_init_all_ring(priv);
2812 	if (ret) {
2813 		ret = -ENOMEM;
2814 		goto out_init_ring_data;
2815 	}
2816 
2817 	ret = register_netdev(netdev);
2818 	if (ret) {
2819 		dev_err(priv->dev, "probe register netdev fail!\n");
2820 		goto out_reg_netdev_fail;
2821 	}
2822 
2823 	/* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2824 	netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2825 
2826 	return ret;
2827 
2828 out_reg_netdev_fail:
2829 out_init_ring_data:
2830 	(void)hns3_nic_uninit_vector_data(priv);
2831 	priv->ring_data = NULL;
2832 out_init_vector_data:
2833 out_get_ring_cfg:
2834 	priv->ae_handle = NULL;
2835 	free_netdev(netdev);
2836 	return ret;
2837 }
2838 
hns3_client_uninit(struct hnae3_handle * handle,bool reset)2839 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2840 {
2841 	struct net_device *netdev = handle->kinfo.netdev;
2842 	struct hns3_nic_priv *priv = netdev_priv(netdev);
2843 	int ret;
2844 
2845 	if (netdev->reg_state != NETREG_UNINITIALIZED)
2846 		unregister_netdev(netdev);
2847 
2848 	ret = hns3_nic_uninit_vector_data(priv);
2849 	if (ret)
2850 		netdev_err(netdev, "uninit vector error\n");
2851 
2852 	ret = hns3_uninit_all_ring(priv);
2853 	if (ret)
2854 		netdev_err(netdev, "uninit ring error\n");
2855 
2856 	priv->ring_data = NULL;
2857 
2858 	free_netdev(netdev);
2859 }
2860 
hns3_link_status_change(struct hnae3_handle * handle,bool linkup)2861 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2862 {
2863 	struct net_device *netdev = handle->kinfo.netdev;
2864 
2865 	if (!netdev)
2866 		return;
2867 
2868 	if (linkup) {
2869 		netif_carrier_on(netdev);
2870 		netif_tx_wake_all_queues(netdev);
2871 		netdev_info(netdev, "link up\n");
2872 	} else {
2873 		netif_carrier_off(netdev);
2874 		netif_tx_stop_all_queues(netdev);
2875 		netdev_info(netdev, "link down\n");
2876 	}
2877 }
2878 
2879 const struct hnae3_client_ops client_ops = {
2880 	.init_instance = hns3_client_init,
2881 	.uninit_instance = hns3_client_uninit,
2882 	.link_status_change = hns3_link_status_change,
2883 };
2884 
2885 /* hns3_init_module - Driver registration routine
2886  * hns3_init_module is the first routine called when the driver is
2887  * loaded. All it does is register with the PCI subsystem.
2888  */
hns3_init_module(void)2889 static int __init hns3_init_module(void)
2890 {
2891 	int ret;
2892 
2893 	pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2894 	pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2895 
2896 	client.type = HNAE3_CLIENT_KNIC;
2897 	snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2898 		 hns3_driver_name);
2899 
2900 	client.ops = &client_ops;
2901 
2902 	INIT_LIST_HEAD(&client.node);
2903 
2904 	ret = hnae3_register_client(&client);
2905 	if (ret)
2906 		return ret;
2907 
2908 	ret = pci_register_driver(&hns3_driver);
2909 	if (ret)
2910 		hnae3_unregister_client(&client);
2911 
2912 	return ret;
2913 }
2914 module_init(hns3_init_module);
2915 
2916 /* hns3_exit_module - Driver exit cleanup routine
2917  * hns3_exit_module is called just before the driver is removed
2918  * from memory.
2919  */
hns3_exit_module(void)2920 static void __exit hns3_exit_module(void)
2921 {
2922 	pci_unregister_driver(&hns3_driver);
2923 	hnae3_unregister_client(&client);
2924 }
2925 module_exit(hns3_exit_module);
2926 
2927 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2928 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2929 MODULE_LICENSE("GPL");
2930 MODULE_ALIAS("pci:hns-nic");
2931