1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #ifndef __HNS3_ENET_H
5 #define __HNS3_ENET_H
6
7 #include <linux/if_vlan.h>
8
9 #include "hnae3.h"
10
11 enum hns3_nic_state {
12 HNS3_NIC_STATE_TESTING,
13 HNS3_NIC_STATE_RESETTING,
14 HNS3_NIC_STATE_INITED,
15 HNS3_NIC_STATE_DOWN,
16 HNS3_NIC_STATE_DISABLED,
17 HNS3_NIC_STATE_REMOVING,
18 HNS3_NIC_STATE_SERVICE_INITED,
19 HNS3_NIC_STATE_SERVICE_SCHED,
20 HNS3_NIC_STATE2_RESET_REQUESTED,
21 HNS3_NIC_STATE_MAX
22 };
23
24 #define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
25 #define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
26 #define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
27 #define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C
28 #define HNS3_RING_RX_RING_TAIL_REG 0x00018
29 #define HNS3_RING_RX_RING_HEAD_REG 0x0001C
30 #define HNS3_RING_RX_RING_FBDNUM_REG 0x00020
31 #define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
32
33 #define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
34 #define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
35 #define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
36 #define HNS3_RING_TX_RING_TC_REG 0x00050
37 #define HNS3_RING_TX_RING_TAIL_REG 0x00058
38 #define HNS3_RING_TX_RING_HEAD_REG 0x0005C
39 #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
40 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064
41 #define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
42 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
43 #define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
44 #define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
45 #define HNS3_RING_EN_REG 0x00090
46 #define HNS3_RING_RX_EN_REG 0x00098
47 #define HNS3_RING_TX_EN_REG 0x000D4
48
49 #define HNS3_RX_HEAD_SIZE 256
50
51 #define HNS3_TX_TIMEOUT (5 * HZ)
52 #define HNS3_RING_NAME_LEN 16
53 #define HNS3_BUFFER_SIZE_2048 2048
54 #define HNS3_RING_MAX_PENDING 32760
55 #define HNS3_RING_MIN_PENDING 72
56 #define HNS3_RING_BD_MULTIPLE 8
57 /* max frame size of mac */
58 #define HNS3_MAC_MAX_FRAME 9728
59 #define HNS3_MAX_MTU \
60 (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
61
62 #define HNS3_BD_SIZE_512_TYPE 0
63 #define HNS3_BD_SIZE_1024_TYPE 1
64 #define HNS3_BD_SIZE_2048_TYPE 2
65 #define HNS3_BD_SIZE_4096_TYPE 3
66
67 #define HNS3_RX_FLAG_VLAN_PRESENT 0x1
68 #define HNS3_RX_FLAG_L3ID_IPV4 0x0
69 #define HNS3_RX_FLAG_L3ID_IPV6 0x1
70 #define HNS3_RX_FLAG_L4ID_UDP 0x0
71 #define HNS3_RX_FLAG_L4ID_TCP 0x1
72
73 #define HNS3_RXD_DMAC_S 0
74 #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
75 #define HNS3_RXD_VLAN_S 2
76 #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
77 #define HNS3_RXD_L3ID_S 4
78 #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
79 #define HNS3_RXD_L4ID_S 8
80 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
81 #define HNS3_RXD_FRAG_B 12
82 #define HNS3_RXD_STRP_TAGP_S 13
83 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
84
85 #define HNS3_RXD_L2E_B 16
86 #define HNS3_RXD_L3E_B 17
87 #define HNS3_RXD_L4E_B 18
88 #define HNS3_RXD_TRUNCAT_B 19
89 #define HNS3_RXD_HOI_B 20
90 #define HNS3_RXD_DOI_B 21
91 #define HNS3_RXD_OL3E_B 22
92 #define HNS3_RXD_OL4E_B 23
93 #define HNS3_RXD_GRO_COUNT_S 24
94 #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
95 #define HNS3_RXD_GRO_FIXID_B 30
96 #define HNS3_RXD_GRO_ECN_B 31
97
98 #define HNS3_RXD_ODMAC_S 0
99 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
100 #define HNS3_RXD_OVLAN_S 2
101 #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
102 #define HNS3_RXD_OL3ID_S 4
103 #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
104 #define HNS3_RXD_OL4ID_S 8
105 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
106 #define HNS3_RXD_FBHI_S 12
107 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
108 #define HNS3_RXD_FBLI_S 14
109 #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
110
111 #define HNS3_RXD_BDTYPE_S 0
112 #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
113 #define HNS3_RXD_VLD_B 4
114 #define HNS3_RXD_UDP0_B 5
115 #define HNS3_RXD_EXTEND_B 7
116 #define HNS3_RXD_FE_B 8
117 #define HNS3_RXD_LUM_B 9
118 #define HNS3_RXD_CRCP_B 10
119 #define HNS3_RXD_L3L4P_B 11
120 #define HNS3_RXD_TSIND_S 12
121 #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
122 #define HNS3_RXD_LKBK_B 15
123 #define HNS3_RXD_GRO_SIZE_S 16
124 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
125
126 #define HNS3_TXD_L3T_S 0
127 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
128 #define HNS3_TXD_L4T_S 2
129 #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
130 #define HNS3_TXD_L3CS_B 4
131 #define HNS3_TXD_L4CS_B 5
132 #define HNS3_TXD_VLAN_B 6
133 #define HNS3_TXD_TSO_B 7
134
135 #define HNS3_TXD_L2LEN_S 8
136 #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
137 #define HNS3_TXD_L3LEN_S 16
138 #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
139 #define HNS3_TXD_L4LEN_S 24
140 #define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
141
142 #define HNS3_TXD_OL3T_S 0
143 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
144 #define HNS3_TXD_OVLAN_B 2
145 #define HNS3_TXD_MACSEC_B 3
146 #define HNS3_TXD_TUNTYPE_S 4
147 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
148
149 #define HNS3_TXD_BDTYPE_S 0
150 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
151 #define HNS3_TXD_FE_B 4
152 #define HNS3_TXD_SC_S 5
153 #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
154 #define HNS3_TXD_EXTEND_B 7
155 #define HNS3_TXD_VLD_B 8
156 #define HNS3_TXD_RI_B 9
157 #define HNS3_TXD_RA_B 10
158 #define HNS3_TXD_TSYN_B 11
159 #define HNS3_TXD_DECTTL_S 12
160 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
161
162 #define HNS3_TXD_MSS_S 0
163 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
164
165 #define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
166 #define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
167
168 #define HNS3_VECTOR_NOT_INITED 0
169 #define HNS3_VECTOR_INITED 1
170
171 #define HNS3_MAX_BD_SIZE 65535
172 #define HNS3_MAX_TSO_BD_NUM 63U
173 #define HNS3_MAX_TSO_SIZE 1048576U
174 #define HNS3_MAX_NON_TSO_SIZE 9728U
175
176
177 #define HNS3_VECTOR_GL0_OFFSET 0x100
178 #define HNS3_VECTOR_GL1_OFFSET 0x200
179 #define HNS3_VECTOR_GL2_OFFSET 0x300
180 #define HNS3_VECTOR_RL_OFFSET 0x900
181 #define HNS3_VECTOR_RL_EN_B 6
182
183 #define HNS3_RING_EN_B 0
184
185 enum hns3_pkt_l2t_type {
186 HNS3_L2_TYPE_UNICAST,
187 HNS3_L2_TYPE_MULTICAST,
188 HNS3_L2_TYPE_BROADCAST,
189 HNS3_L2_TYPE_INVALID,
190 };
191
192 enum hns3_pkt_l3t_type {
193 HNS3_L3T_NONE,
194 HNS3_L3T_IPV6,
195 HNS3_L3T_IPV4,
196 HNS3_L3T_RESERVED
197 };
198
199 enum hns3_pkt_l4t_type {
200 HNS3_L4T_UNKNOWN,
201 HNS3_L4T_TCP,
202 HNS3_L4T_UDP,
203 HNS3_L4T_SCTP
204 };
205
206 enum hns3_pkt_ol3t_type {
207 HNS3_OL3T_NONE,
208 HNS3_OL3T_IPV6,
209 HNS3_OL3T_IPV4_NO_CSUM,
210 HNS3_OL3T_IPV4_CSUM
211 };
212
213 enum hns3_pkt_tun_type {
214 HNS3_TUN_NONE,
215 HNS3_TUN_MAC_IN_UDP,
216 HNS3_TUN_NVGRE,
217 HNS3_TUN_OTHER
218 };
219
220 /* hardware spec ring buffer format */
221 struct __packed hns3_desc {
222 __le64 addr;
223 union {
224 struct {
225 __le16 vlan_tag;
226 __le16 send_size;
227 union {
228 __le32 type_cs_vlan_tso_len;
229 struct {
230 __u8 type_cs_vlan_tso;
231 __u8 l2_len;
232 __u8 l3_len;
233 __u8 l4_len;
234 };
235 };
236 __le16 outer_vlan_tag;
237 __le16 tv;
238
239 union {
240 __le32 ol_type_vlan_len_msec;
241 struct {
242 __u8 ol_type_vlan_msec;
243 __u8 ol2_len;
244 __u8 ol3_len;
245 __u8 ol4_len;
246 };
247 };
248
249 __le32 paylen;
250 __le16 bdtp_fe_sc_vld_ra_ri;
251 __le16 mss;
252 } tx;
253
254 struct {
255 __le32 l234_info;
256 __le16 pkt_len;
257 __le16 size;
258
259 __le32 rss_hash;
260 __le16 fd_id;
261 __le16 vlan_tag;
262
263 union {
264 __le32 ol_info;
265 struct {
266 __le16 o_dm_vlan_id_fb;
267 __le16 ot_vlan_tag;
268 };
269 };
270
271 __le32 bd_base_info;
272 } rx;
273 };
274 };
275
276 struct hns3_desc_cb {
277 dma_addr_t dma; /* dma address of this desc */
278 void *buf; /* cpu addr for a desc */
279
280 /* priv data for the desc, e.g. skb when use with ip stack */
281 void *priv;
282 u32 page_offset;
283 u32 length; /* length of the buffer */
284
285 u16 reuse_flag;
286 u16 refill;
287
288 /* desc type, used by the ring user to mark the type of the priv data */
289 u16 type;
290 u16 pagecnt_bias;
291 };
292
293 enum hns3_pkt_l3type {
294 HNS3_L3_TYPE_IPV4,
295 HNS3_L3_TYPE_IPV6,
296 HNS3_L3_TYPE_ARP,
297 HNS3_L3_TYPE_RARP,
298 HNS3_L3_TYPE_IPV4_OPT,
299 HNS3_L3_TYPE_IPV6_EXT,
300 HNS3_L3_TYPE_LLDP,
301 HNS3_L3_TYPE_BPDU,
302 HNS3_L3_TYPE_MAC_PAUSE,
303 HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
304
305 /* reserved for 0xA~0xB */
306
307 HNS3_L3_TYPE_CNM = 0xc,
308
309 /* reserved for 0xD~0xE */
310
311 HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
312 };
313
314 enum hns3_pkt_l4type {
315 HNS3_L4_TYPE_UDP,
316 HNS3_L4_TYPE_TCP,
317 HNS3_L4_TYPE_GRE,
318 HNS3_L4_TYPE_SCTP,
319 HNS3_L4_TYPE_IGMP,
320 HNS3_L4_TYPE_ICMP,
321
322 /* reserved for 0x6~0xE */
323
324 HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */
325 };
326
327 enum hns3_pkt_ol3type {
328 HNS3_OL3_TYPE_IPV4 = 0,
329 HNS3_OL3_TYPE_IPV6,
330 /* reserved for 0x2~0x3 */
331 HNS3_OL3_TYPE_IPV4_OPT = 4,
332 HNS3_OL3_TYPE_IPV6_EXT,
333
334 /* reserved for 0x6~0xE */
335
336 HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
337 };
338
339 enum hns3_pkt_ol4type {
340 HNS3_OL4_TYPE_NO_TUN,
341 HNS3_OL4_TYPE_MAC_IN_UDP,
342 HNS3_OL4_TYPE_NVGRE,
343 HNS3_OL4_TYPE_UNKNOWN
344 };
345
346 struct ring_stats {
347 u64 sw_err_cnt;
348 u64 seg_pkt_cnt;
349 union {
350 struct {
351 u64 tx_pkts;
352 u64 tx_bytes;
353 u64 tx_more;
354 u64 restart_queue;
355 u64 tx_busy;
356 u64 tx_copy;
357 u64 tx_vlan_err;
358 u64 tx_l4_proto_err;
359 u64 tx_l2l3l4_err;
360 u64 tx_tso_err;
361 u64 over_max_recursion;
362 u64 hw_limitation;
363 };
364 struct {
365 u64 rx_pkts;
366 u64 rx_bytes;
367 u64 rx_err_cnt;
368 u64 reuse_pg_cnt;
369 u64 err_pkt_len;
370 u64 err_bd_num;
371 u64 l2_err;
372 u64 l3l4_csum_err;
373 u64 rx_multicast;
374 u64 non_reuse_pg;
375 };
376 };
377 };
378
379 struct hns3_enet_ring {
380 struct hns3_desc *desc; /* dma map address space */
381 struct hns3_desc_cb *desc_cb;
382 struct hns3_enet_ring *next;
383 struct hns3_enet_tqp_vector *tqp_vector;
384 struct hnae3_queue *tqp;
385 int queue_index;
386 struct device *dev; /* will be used for DMA mapping of descriptors */
387
388 /* statistic */
389 struct ring_stats stats;
390 struct u64_stats_sync syncp;
391
392 dma_addr_t desc_dma_addr;
393 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
394 u16 desc_num; /* total number of desc */
395 int next_to_use; /* idx of next spare desc */
396
397 /* idx of lastest sent desc, the ring is empty when equal to
398 * next_to_use
399 */
400 int next_to_clean;
401 union {
402 int last_to_use; /* last idx used by xmit */
403 u32 pull_len; /* memcpy len for current rx packet */
404 };
405 u32 frag_num;
406 void *va; /* first buffer address for current packet */
407
408 u32 flag; /* ring attribute */
409
410 int pending_buf;
411 struct sk_buff *skb;
412 struct sk_buff *tail_skb;
413 } ____cacheline_internodealigned_in_smp;
414
415 enum hns3_flow_level_range {
416 HNS3_FLOW_LOW = 0,
417 HNS3_FLOW_MID = 1,
418 HNS3_FLOW_HIGH = 2,
419 HNS3_FLOW_ULTRA = 3,
420 };
421
422 #define HNS3_INT_GL_MAX 0x1FE0
423 #define HNS3_INT_GL_50K 0x0014
424 #define HNS3_INT_GL_20K 0x0032
425 #define HNS3_INT_GL_18K 0x0036
426 #define HNS3_INT_GL_8K 0x007C
427
428 #define HNS3_INT_RL_MAX 0x00EC
429 #define HNS3_INT_RL_ENABLE_MASK 0x40
430
431 struct hns3_enet_coalesce {
432 u16 int_gl;
433 u8 gl_adapt_enable;
434 enum hns3_flow_level_range flow_level;
435 };
436
437 struct hns3_enet_ring_group {
438 /* array of pointers to rings */
439 struct hns3_enet_ring *ring;
440 u64 total_bytes; /* total bytes processed this group */
441 u64 total_packets; /* total packets processed this group */
442 u16 count;
443 struct hns3_enet_coalesce coal;
444 };
445
446 struct hns3_enet_tqp_vector {
447 struct hnae3_handle *handle;
448 u8 __iomem *mask_addr;
449 int vector_irq;
450 int irq_init_flag;
451
452 u16 idx; /* index in the TQP vector array per handle. */
453
454 struct napi_struct napi;
455
456 struct hns3_enet_ring_group rx_group;
457 struct hns3_enet_ring_group tx_group;
458
459 cpumask_t affinity_mask;
460 u16 num_tqps; /* total number of tqps in TQP vector */
461 struct irq_affinity_notify affinity_notify;
462
463 char name[HNAE3_INT_NAME_LEN];
464
465 unsigned long last_jiffies;
466 } ____cacheline_internodealigned_in_smp;
467
468 struct hns3_nic_priv {
469 struct hnae3_handle *ae_handle;
470 struct net_device *netdev;
471 struct device *dev;
472
473 /**
474 * the cb for nic to manage the ring buffer, the first half of the
475 * array is for tx_ring and vice versa for the second half
476 */
477 struct hns3_enet_ring *ring;
478 struct hns3_enet_tqp_vector *tqp_vector;
479 u16 vector_num;
480 u8 max_non_tso_bd_num;
481
482 u64 tx_timeout_count;
483
484 unsigned long state;
485
486 struct hns3_enet_coalesce tx_coal;
487 struct hns3_enet_coalesce rx_coal;
488 };
489
490 union l3_hdr_info {
491 struct iphdr *v4;
492 struct ipv6hdr *v6;
493 unsigned char *hdr;
494 };
495
496 union l4_hdr_info {
497 struct tcphdr *tcp;
498 struct udphdr *udp;
499 struct gre_base_hdr *gre;
500 unsigned char *hdr;
501 };
502
503 struct hns3_hw_error_info {
504 enum hnae3_hw_error_type type;
505 const char *msg;
506 };
507
ring_space(struct hns3_enet_ring * ring)508 static inline int ring_space(struct hns3_enet_ring *ring)
509 {
510 /* This smp_load_acquire() pairs with smp_store_release() in
511 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
512 */
513 int begin = smp_load_acquire(&ring->next_to_clean);
514 int end = READ_ONCE(ring->next_to_use);
515
516 return ((end >= begin) ? (ring->desc_num - end + begin) :
517 (begin - end)) - 1;
518 }
519
hns3_read_reg(void __iomem * base,u32 reg)520 static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
521 {
522 return readl(base + reg);
523 }
524
hns3_write_reg(void __iomem * base,u32 reg,u32 value)525 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
526 {
527 u8 __iomem *reg_addr = READ_ONCE(base);
528
529 writel(value, reg_addr + reg);
530 }
531
532 #define hns3_read_dev(a, reg) \
533 hns3_read_reg((a)->io_base, (reg))
534
hns3_nic_resetting(struct net_device * netdev)535 static inline bool hns3_nic_resetting(struct net_device *netdev)
536 {
537 struct hns3_nic_priv *priv = netdev_priv(netdev);
538
539 return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
540 }
541
542 #define hns3_write_dev(a, reg, value) \
543 hns3_write_reg((a)->io_base, (reg), (value))
544
545 #define ring_to_dev(ring) ((ring)->dev)
546
547 #define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
548
549 #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
550 DMA_TO_DEVICE : DMA_FROM_DEVICE)
551
552 #define hns3_buf_size(_ring) ((_ring)->buf_size)
553
hns3_page_order(struct hns3_enet_ring * ring)554 static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
555 {
556 #if (PAGE_SIZE < 8192)
557 if (ring->buf_size > (PAGE_SIZE / 2))
558 return 1;
559 #endif
560 return 0;
561 }
562
563 #define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
564
565 /* iterator for handling rings in ring group */
566 #define hns3_for_each_ring(pos, head) \
567 for (pos = (head).ring; pos; pos = pos->next)
568
569 #define hns3_get_handle(ndev) \
570 (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
571
572 #define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
573 #define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
574
575 #define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
576 #define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
577
578 void hns3_ethtool_set_ops(struct net_device *netdev);
579 int hns3_set_channels(struct net_device *netdev,
580 struct ethtool_channels *ch);
581
582 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
583 int hns3_init_all_ring(struct hns3_nic_priv *priv);
584 int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
585 int hns3_nic_reset_all_ring(struct hnae3_handle *h);
586 void hns3_fini_ring(struct hns3_enet_ring *ring);
587 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
588 bool hns3_is_phys_func(struct pci_dev *pdev);
589 int hns3_clean_rx_ring(
590 struct hns3_enet_ring *ring, int budget,
591 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
592
593 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
594 u32 gl_value);
595 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
596 u32 gl_value);
597 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
598 u32 rl_value);
599
600 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
601 void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
602
603 #ifdef CONFIG_HNS3_DCB
604 void hns3_dcbnl_setup(struct hnae3_handle *handle);
605 #else
hns3_dcbnl_setup(struct hnae3_handle * handle)606 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
607 #endif
608
609 void hns3_dbg_init(struct hnae3_handle *handle);
610 void hns3_dbg_uninit(struct hnae3_handle *handle);
611 void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
612 void hns3_dbg_unregister_debugfs(void);
613 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
614 #endif
615