1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 /******************************************************************************
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
6 ******************************************************************************/
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/vmalloc.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/tcp.h>
20 #include <linux/sctp.h>
21 #include <linux/ipv6.h>
22 #include <linux/slab.h>
23 #include <net/checksum.h>
24 #include <net/ip6_checksum.h>
25 #include <linux/ethtool.h>
26 #include <linux/if.h>
27 #include <linux/if_vlan.h>
28 #include <linux/prefetch.h>
29 #include <net/mpls.h>
30 #include <linux/bpf.h>
31 #include <linux/bpf_trace.h>
32 #include <linux/atomic.h>
33
34 #include "ixgbevf.h"
35
36 const char ixgbevf_driver_name[] = "ixgbevf";
37 static const char ixgbevf_driver_string[] =
38 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
39
40 #define DRV_VERSION "4.1.0-k"
41 const char ixgbevf_driver_version[] = DRV_VERSION;
42 static char ixgbevf_copyright[] =
43 "Copyright (c) 2009 - 2015 Intel Corporation.";
44
45 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
46 [board_82599_vf] = &ixgbevf_82599_vf_info,
47 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
48 [board_X540_vf] = &ixgbevf_X540_vf_info,
49 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
50 [board_X550_vf] = &ixgbevf_X550_vf_info,
51 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
52 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
53 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
54 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
55 };
56
57 /* ixgbevf_pci_tbl - PCI Device ID Table
58 *
59 * Wildcard entries (PCI_ANY_ID) should come last
60 * Last entry must be all 0s
61 *
62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
63 * Class, Class Mask, private data (not used) }
64 */
65 static const struct pci_device_id ixgbevf_pci_tbl[] = {
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
75 /* required last entry */
76 {0, }
77 };
78 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
79
80 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
81 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_VERSION);
84
85 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
86 static int debug = -1;
87 module_param(debug, int, 0);
88 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
89
90 static struct workqueue_struct *ixgbevf_wq;
91
ixgbevf_service_event_schedule(struct ixgbevf_adapter * adapter)92 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
93 {
94 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
95 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
96 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
97 queue_work(ixgbevf_wq, &adapter->service_task);
98 }
99
ixgbevf_service_event_complete(struct ixgbevf_adapter * adapter)100 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
101 {
102 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
103
104 /* flush memory to make sure state is correct before next watchdog */
105 smp_mb__before_atomic();
106 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
107 }
108
109 /* forward decls */
110 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
111 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
112 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
113 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
114 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
115 struct ixgbevf_rx_buffer *old_buff);
116
ixgbevf_remove_adapter(struct ixgbe_hw * hw)117 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
118 {
119 struct ixgbevf_adapter *adapter = hw->back;
120
121 if (!hw->hw_addr)
122 return;
123 hw->hw_addr = NULL;
124 dev_err(&adapter->pdev->dev, "Adapter removed\n");
125 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
126 ixgbevf_service_event_schedule(adapter);
127 }
128
ixgbevf_check_remove(struct ixgbe_hw * hw,u32 reg)129 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
130 {
131 u32 value;
132
133 /* The following check not only optimizes a bit by not
134 * performing a read on the status register when the
135 * register just read was a status register read that
136 * returned IXGBE_FAILED_READ_REG. It also blocks any
137 * potential recursion.
138 */
139 if (reg == IXGBE_VFSTATUS) {
140 ixgbevf_remove_adapter(hw);
141 return;
142 }
143 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
144 if (value == IXGBE_FAILED_READ_REG)
145 ixgbevf_remove_adapter(hw);
146 }
147
ixgbevf_read_reg(struct ixgbe_hw * hw,u32 reg)148 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
149 {
150 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
151 u32 value;
152
153 if (IXGBE_REMOVED(reg_addr))
154 return IXGBE_FAILED_READ_REG;
155 value = readl(reg_addr + reg);
156 if (unlikely(value == IXGBE_FAILED_READ_REG))
157 ixgbevf_check_remove(hw, reg);
158 return value;
159 }
160
161 /**
162 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
163 * @adapter: pointer to adapter struct
164 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
165 * @queue: queue to map the corresponding interrupt to
166 * @msix_vector: the vector to map to the corresponding queue
167 **/
ixgbevf_set_ivar(struct ixgbevf_adapter * adapter,s8 direction,u8 queue,u8 msix_vector)168 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
169 u8 queue, u8 msix_vector)
170 {
171 u32 ivar, index;
172 struct ixgbe_hw *hw = &adapter->hw;
173
174 if (direction == -1) {
175 /* other causes */
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
178 ivar &= ~0xFF;
179 ivar |= msix_vector;
180 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
181 } else {
182 /* Tx or Rx causes */
183 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
184 index = ((16 * (queue & 1)) + (8 * direction));
185 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
186 ivar &= ~(0xFF << index);
187 ivar |= (msix_vector << index);
188 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
189 }
190 }
191
ixgbevf_get_tx_completed(struct ixgbevf_ring * ring)192 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
193 {
194 return ring->stats.packets;
195 }
196
ixgbevf_get_tx_pending(struct ixgbevf_ring * ring)197 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
198 {
199 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
200 struct ixgbe_hw *hw = &adapter->hw;
201
202 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
203 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
204
205 if (head != tail)
206 return (head < tail) ?
207 tail - head : (tail + ring->count - head);
208
209 return 0;
210 }
211
ixgbevf_check_tx_hang(struct ixgbevf_ring * tx_ring)212 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
213 {
214 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
215 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
216 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
217
218 clear_check_for_tx_hang(tx_ring);
219
220 /* Check for a hung queue, but be thorough. This verifies
221 * that a transmit has been completed since the previous
222 * check AND there is at least one packet pending. The
223 * ARMED bit is set to indicate a potential hang.
224 */
225 if ((tx_done_old == tx_done) && tx_pending) {
226 /* make sure it is true for two checks in a row */
227 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
228 &tx_ring->state);
229 }
230 /* reset the countdown */
231 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
232
233 /* update completed stats and continue */
234 tx_ring->tx_stats.tx_done_old = tx_done;
235
236 return false;
237 }
238
ixgbevf_tx_timeout_reset(struct ixgbevf_adapter * adapter)239 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
240 {
241 /* Do the reset outside of interrupt context */
242 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
243 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
244 ixgbevf_service_event_schedule(adapter);
245 }
246 }
247
248 /**
249 * ixgbevf_tx_timeout - Respond to a Tx Hang
250 * @netdev: network interface device structure
251 **/
ixgbevf_tx_timeout(struct net_device * netdev)252 static void ixgbevf_tx_timeout(struct net_device *netdev)
253 {
254 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
255
256 ixgbevf_tx_timeout_reset(adapter);
257 }
258
259 /**
260 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
261 * @q_vector: board private structure
262 * @tx_ring: tx ring to clean
263 * @napi_budget: Used to determine if we are in netpoll
264 **/
ixgbevf_clean_tx_irq(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring * tx_ring,int napi_budget)265 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
266 struct ixgbevf_ring *tx_ring, int napi_budget)
267 {
268 struct ixgbevf_adapter *adapter = q_vector->adapter;
269 struct ixgbevf_tx_buffer *tx_buffer;
270 union ixgbe_adv_tx_desc *tx_desc;
271 unsigned int total_bytes = 0, total_packets = 0;
272 unsigned int budget = tx_ring->count / 2;
273 unsigned int i = tx_ring->next_to_clean;
274
275 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
276 return true;
277
278 tx_buffer = &tx_ring->tx_buffer_info[i];
279 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
280 i -= tx_ring->count;
281
282 do {
283 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
284
285 /* if next_to_watch is not set then there is no work pending */
286 if (!eop_desc)
287 break;
288
289 /* prevent any other reads prior to eop_desc */
290 smp_rmb();
291
292 /* if DD is not set pending work has not been completed */
293 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
294 break;
295
296 /* clear next_to_watch to prevent false hangs */
297 tx_buffer->next_to_watch = NULL;
298
299 /* update the statistics for this packet */
300 total_bytes += tx_buffer->bytecount;
301 total_packets += tx_buffer->gso_segs;
302
303 /* free the skb */
304 if (ring_is_xdp(tx_ring))
305 page_frag_free(tx_buffer->data);
306 else
307 napi_consume_skb(tx_buffer->skb, napi_budget);
308
309 /* unmap skb header data */
310 dma_unmap_single(tx_ring->dev,
311 dma_unmap_addr(tx_buffer, dma),
312 dma_unmap_len(tx_buffer, len),
313 DMA_TO_DEVICE);
314
315 /* clear tx_buffer data */
316 dma_unmap_len_set(tx_buffer, len, 0);
317
318 /* unmap remaining buffers */
319 while (tx_desc != eop_desc) {
320 tx_buffer++;
321 tx_desc++;
322 i++;
323 if (unlikely(!i)) {
324 i -= tx_ring->count;
325 tx_buffer = tx_ring->tx_buffer_info;
326 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
327 }
328
329 /* unmap any remaining paged data */
330 if (dma_unmap_len(tx_buffer, len)) {
331 dma_unmap_page(tx_ring->dev,
332 dma_unmap_addr(tx_buffer, dma),
333 dma_unmap_len(tx_buffer, len),
334 DMA_TO_DEVICE);
335 dma_unmap_len_set(tx_buffer, len, 0);
336 }
337 }
338
339 /* move us one more past the eop_desc for start of next pkt */
340 tx_buffer++;
341 tx_desc++;
342 i++;
343 if (unlikely(!i)) {
344 i -= tx_ring->count;
345 tx_buffer = tx_ring->tx_buffer_info;
346 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
347 }
348
349 /* issue prefetch for next Tx descriptor */
350 prefetch(tx_desc);
351
352 /* update budget accounting */
353 budget--;
354 } while (likely(budget));
355
356 i += tx_ring->count;
357 tx_ring->next_to_clean = i;
358 u64_stats_update_begin(&tx_ring->syncp);
359 tx_ring->stats.bytes += total_bytes;
360 tx_ring->stats.packets += total_packets;
361 u64_stats_update_end(&tx_ring->syncp);
362 q_vector->tx.total_bytes += total_bytes;
363 q_vector->tx.total_packets += total_packets;
364
365 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
366 struct ixgbe_hw *hw = &adapter->hw;
367 union ixgbe_adv_tx_desc *eop_desc;
368
369 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
370
371 pr_err("Detected Tx Unit Hang%s\n"
372 " Tx Queue <%d>\n"
373 " TDH, TDT <%x>, <%x>\n"
374 " next_to_use <%x>\n"
375 " next_to_clean <%x>\n"
376 "tx_buffer_info[next_to_clean]\n"
377 " next_to_watch <%p>\n"
378 " eop_desc->wb.status <%x>\n"
379 " time_stamp <%lx>\n"
380 " jiffies <%lx>\n",
381 ring_is_xdp(tx_ring) ? " XDP" : "",
382 tx_ring->queue_index,
383 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
384 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
385 tx_ring->next_to_use, i,
386 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
387 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
388
389 if (!ring_is_xdp(tx_ring))
390 netif_stop_subqueue(tx_ring->netdev,
391 tx_ring->queue_index);
392
393 /* schedule immediate reset if we believe we hung */
394 ixgbevf_tx_timeout_reset(adapter);
395
396 return true;
397 }
398
399 if (ring_is_xdp(tx_ring))
400 return !!budget;
401
402 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
403 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
404 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
405 /* Make sure that anybody stopping the queue after this
406 * sees the new next_to_clean.
407 */
408 smp_mb();
409
410 if (__netif_subqueue_stopped(tx_ring->netdev,
411 tx_ring->queue_index) &&
412 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
413 netif_wake_subqueue(tx_ring->netdev,
414 tx_ring->queue_index);
415 ++tx_ring->tx_stats.restart_queue;
416 }
417 }
418
419 return !!budget;
420 }
421
422 /**
423 * ixgbevf_rx_skb - Helper function to determine proper Rx method
424 * @q_vector: structure containing interrupt and ring information
425 * @skb: packet to send up
426 **/
ixgbevf_rx_skb(struct ixgbevf_q_vector * q_vector,struct sk_buff * skb)427 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
428 struct sk_buff *skb)
429 {
430 napi_gro_receive(&q_vector->napi, skb);
431 }
432
433 #define IXGBE_RSS_L4_TYPES_MASK \
434 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
435 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
436 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
438
ixgbevf_rx_hash(struct ixgbevf_ring * ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)439 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
440 union ixgbe_adv_rx_desc *rx_desc,
441 struct sk_buff *skb)
442 {
443 u16 rss_type;
444
445 if (!(ring->netdev->features & NETIF_F_RXHASH))
446 return;
447
448 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
449 IXGBE_RXDADV_RSSTYPE_MASK;
450
451 if (!rss_type)
452 return;
453
454 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
455 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
456 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
457 }
458
459 /**
460 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
461 * @ring: structure containig ring specific data
462 * @rx_desc: current Rx descriptor being processed
463 * @skb: skb currently being received and modified
464 **/
ixgbevf_rx_checksum(struct ixgbevf_ring * ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)465 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
466 union ixgbe_adv_rx_desc *rx_desc,
467 struct sk_buff *skb)
468 {
469 skb_checksum_none_assert(skb);
470
471 /* Rx csum disabled */
472 if (!(ring->netdev->features & NETIF_F_RXCSUM))
473 return;
474
475 /* if IP and error */
476 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
477 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
478 ring->rx_stats.csum_err++;
479 return;
480 }
481
482 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
483 return;
484
485 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
486 ring->rx_stats.csum_err++;
487 return;
488 }
489
490 /* It must be a TCP or UDP packet with a valid checksum */
491 skb->ip_summed = CHECKSUM_UNNECESSARY;
492 }
493
494 /**
495 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
496 * @rx_ring: rx descriptor ring packet is being transacted on
497 * @rx_desc: pointer to the EOP Rx descriptor
498 * @skb: pointer to current skb being populated
499 *
500 * This function checks the ring, descriptor, and packet information in
501 * order to populate the checksum, VLAN, protocol, and other fields within
502 * the skb.
503 **/
ixgbevf_process_skb_fields(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)504 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
505 union ixgbe_adv_rx_desc *rx_desc,
506 struct sk_buff *skb)
507 {
508 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
509 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
510
511 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
512 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
513 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
514
515 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
516 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
517 }
518
519 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
520 }
521
522 static
ixgbevf_get_rx_buffer(struct ixgbevf_ring * rx_ring,const unsigned int size)523 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
524 const unsigned int size)
525 {
526 struct ixgbevf_rx_buffer *rx_buffer;
527
528 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
529 prefetchw(rx_buffer->page);
530
531 /* we are reusing so sync this buffer for CPU use */
532 dma_sync_single_range_for_cpu(rx_ring->dev,
533 rx_buffer->dma,
534 rx_buffer->page_offset,
535 size,
536 DMA_FROM_DEVICE);
537
538 rx_buffer->pagecnt_bias--;
539
540 return rx_buffer;
541 }
542
ixgbevf_put_rx_buffer(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct sk_buff * skb)543 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
544 struct ixgbevf_rx_buffer *rx_buffer,
545 struct sk_buff *skb)
546 {
547 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
548 /* hand second half of page back to the ring */
549 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
550 } else {
551 if (IS_ERR(skb))
552 /* We are not reusing the buffer so unmap it and free
553 * any references we are holding to it
554 */
555 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
556 ixgbevf_rx_pg_size(rx_ring),
557 DMA_FROM_DEVICE,
558 IXGBEVF_RX_DMA_ATTR);
559 __page_frag_cache_drain(rx_buffer->page,
560 rx_buffer->pagecnt_bias);
561 }
562
563 /* clear contents of rx_buffer */
564 rx_buffer->page = NULL;
565 }
566
567 /**
568 * ixgbevf_is_non_eop - process handling of non-EOP buffers
569 * @rx_ring: Rx ring being processed
570 * @rx_desc: Rx descriptor for current buffer
571 *
572 * This function updates next to clean. If the buffer is an EOP buffer
573 * this function exits returning false, otherwise it will place the
574 * sk_buff in the next buffer to be chained and return true indicating
575 * that this is in fact a non-EOP buffer.
576 **/
ixgbevf_is_non_eop(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc)577 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
578 union ixgbe_adv_rx_desc *rx_desc)
579 {
580 u32 ntc = rx_ring->next_to_clean + 1;
581
582 /* fetch, update, and store next to clean */
583 ntc = (ntc < rx_ring->count) ? ntc : 0;
584 rx_ring->next_to_clean = ntc;
585
586 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
587
588 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
589 return false;
590
591 return true;
592 }
593
ixgbevf_rx_offset(struct ixgbevf_ring * rx_ring)594 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
595 {
596 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
597 }
598
ixgbevf_alloc_mapped_page(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * bi)599 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
600 struct ixgbevf_rx_buffer *bi)
601 {
602 struct page *page = bi->page;
603 dma_addr_t dma;
604
605 /* since we are recycling buffers we should seldom need to alloc */
606 if (likely(page))
607 return true;
608
609 /* alloc new page for storage */
610 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
611 if (unlikely(!page)) {
612 rx_ring->rx_stats.alloc_rx_page_failed++;
613 return false;
614 }
615
616 /* map page for use */
617 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
618 ixgbevf_rx_pg_size(rx_ring),
619 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
620
621 /* if mapping failed free memory back to system since
622 * there isn't much point in holding memory we can't use
623 */
624 if (dma_mapping_error(rx_ring->dev, dma)) {
625 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
626
627 rx_ring->rx_stats.alloc_rx_page_failed++;
628 return false;
629 }
630
631 bi->dma = dma;
632 bi->page = page;
633 bi->page_offset = ixgbevf_rx_offset(rx_ring);
634 bi->pagecnt_bias = 1;
635 rx_ring->rx_stats.alloc_rx_page++;
636
637 return true;
638 }
639
640 /**
641 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
642 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
643 * @cleaned_count: number of buffers to replace
644 **/
ixgbevf_alloc_rx_buffers(struct ixgbevf_ring * rx_ring,u16 cleaned_count)645 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
646 u16 cleaned_count)
647 {
648 union ixgbe_adv_rx_desc *rx_desc;
649 struct ixgbevf_rx_buffer *bi;
650 unsigned int i = rx_ring->next_to_use;
651
652 /* nothing to do or no valid netdev defined */
653 if (!cleaned_count || !rx_ring->netdev)
654 return;
655
656 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
657 bi = &rx_ring->rx_buffer_info[i];
658 i -= rx_ring->count;
659
660 do {
661 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
662 break;
663
664 /* sync the buffer for use by the device */
665 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
666 bi->page_offset,
667 ixgbevf_rx_bufsz(rx_ring),
668 DMA_FROM_DEVICE);
669
670 /* Refresh the desc even if pkt_addr didn't change
671 * because each write-back erases this info.
672 */
673 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
674
675 rx_desc++;
676 bi++;
677 i++;
678 if (unlikely(!i)) {
679 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
680 bi = rx_ring->rx_buffer_info;
681 i -= rx_ring->count;
682 }
683
684 /* clear the length for the next_to_use descriptor */
685 rx_desc->wb.upper.length = 0;
686
687 cleaned_count--;
688 } while (cleaned_count);
689
690 i += rx_ring->count;
691
692 if (rx_ring->next_to_use != i) {
693 /* record the next descriptor to use */
694 rx_ring->next_to_use = i;
695
696 /* update next to alloc since we have filled the ring */
697 rx_ring->next_to_alloc = i;
698
699 /* Force memory writes to complete before letting h/w
700 * know there are new descriptors to fetch. (Only
701 * applicable for weak-ordered memory model archs,
702 * such as IA-64).
703 */
704 wmb();
705 ixgbevf_write_tail(rx_ring, i);
706 }
707 }
708
709 /**
710 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
711 * @rx_ring: rx descriptor ring packet is being transacted on
712 * @rx_desc: pointer to the EOP Rx descriptor
713 * @skb: pointer to current skb being fixed
714 *
715 * Check for corrupted packet headers caused by senders on the local L2
716 * embedded NIC switch not setting up their Tx Descriptors right. These
717 * should be very rare.
718 *
719 * Also address the case where we are pulling data in on pages only
720 * and as such no data is present in the skb header.
721 *
722 * In addition if skb is not at least 60 bytes we need to pad it so that
723 * it is large enough to qualify as a valid Ethernet frame.
724 *
725 * Returns true if an error was encountered and skb was freed.
726 **/
ixgbevf_cleanup_headers(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)727 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
728 union ixgbe_adv_rx_desc *rx_desc,
729 struct sk_buff *skb)
730 {
731 /* XDP packets use error pointer so abort at this point */
732 if (IS_ERR(skb))
733 return true;
734
735 /* verify that the packet does not have any known errors */
736 if (unlikely(ixgbevf_test_staterr(rx_desc,
737 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
738 struct net_device *netdev = rx_ring->netdev;
739
740 if (!(netdev->features & NETIF_F_RXALL)) {
741 dev_kfree_skb_any(skb);
742 return true;
743 }
744 }
745
746 /* if eth_skb_pad returns an error the skb was freed */
747 if (eth_skb_pad(skb))
748 return true;
749
750 return false;
751 }
752
753 /**
754 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
755 * @rx_ring: rx descriptor ring to store buffers on
756 * @old_buff: donor buffer to have page reused
757 *
758 * Synchronizes page for reuse by the adapter
759 **/
ixgbevf_reuse_rx_page(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * old_buff)760 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
761 struct ixgbevf_rx_buffer *old_buff)
762 {
763 struct ixgbevf_rx_buffer *new_buff;
764 u16 nta = rx_ring->next_to_alloc;
765
766 new_buff = &rx_ring->rx_buffer_info[nta];
767
768 /* update, and store next to alloc */
769 nta++;
770 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
771
772 /* transfer page from old buffer to new buffer */
773 new_buff->page = old_buff->page;
774 new_buff->dma = old_buff->dma;
775 new_buff->page_offset = old_buff->page_offset;
776 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
777 }
778
ixgbevf_page_is_reserved(struct page * page)779 static inline bool ixgbevf_page_is_reserved(struct page *page)
780 {
781 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
782 }
783
ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer * rx_buffer)784 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
785 {
786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
787 struct page *page = rx_buffer->page;
788
789 /* avoid re-using remote pages */
790 if (unlikely(ixgbevf_page_is_reserved(page)))
791 return false;
792
793 #if (PAGE_SIZE < 8192)
794 /* if we are only owner of page we can reuse it */
795 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
796 return false;
797 #else
798 #define IXGBEVF_LAST_OFFSET \
799 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
800
801 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
802 return false;
803
804 #endif
805
806 /* If we have drained the page fragment pool we need to update
807 * the pagecnt_bias and page count so that we fully restock the
808 * number of references the driver holds.
809 */
810 if (unlikely(!pagecnt_bias)) {
811 page_ref_add(page, USHRT_MAX);
812 rx_buffer->pagecnt_bias = USHRT_MAX;
813 }
814
815 return true;
816 }
817
818 /**
819 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
820 * @rx_ring: rx descriptor ring to transact packets on
821 * @rx_buffer: buffer containing page to add
822 * @skb: sk_buff to place the data into
823 * @size: size of buffer to be added
824 *
825 * This function will add the data contained in rx_buffer->page to the skb.
826 **/
ixgbevf_add_rx_frag(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)827 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
828 struct ixgbevf_rx_buffer *rx_buffer,
829 struct sk_buff *skb,
830 unsigned int size)
831 {
832 #if (PAGE_SIZE < 8192)
833 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
834 #else
835 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
836 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
837 SKB_DATA_ALIGN(size);
838 #endif
839 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
840 rx_buffer->page_offset, size, truesize);
841 #if (PAGE_SIZE < 8192)
842 rx_buffer->page_offset ^= truesize;
843 #else
844 rx_buffer->page_offset += truesize;
845 #endif
846 }
847
848 static
ixgbevf_construct_skb(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct xdp_buff * xdp,union ixgbe_adv_rx_desc * rx_desc)849 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
850 struct ixgbevf_rx_buffer *rx_buffer,
851 struct xdp_buff *xdp,
852 union ixgbe_adv_rx_desc *rx_desc)
853 {
854 unsigned int size = xdp->data_end - xdp->data;
855 #if (PAGE_SIZE < 8192)
856 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857 #else
858 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
859 xdp->data_hard_start);
860 #endif
861 unsigned int headlen;
862 struct sk_buff *skb;
863
864 /* prefetch first cache line of first page */
865 prefetch(xdp->data);
866 #if L1_CACHE_BYTES < 128
867 prefetch(xdp->data + L1_CACHE_BYTES);
868 #endif
869 /* Note, we get here by enabling legacy-rx via:
870 *
871 * ethtool --set-priv-flags <dev> legacy-rx on
872 *
873 * In this mode, we currently get 0 extra XDP headroom as
874 * opposed to having legacy-rx off, where we process XDP
875 * packets going to stack via ixgbevf_build_skb().
876 *
877 * For ixgbevf_construct_skb() mode it means that the
878 * xdp->data_meta will always point to xdp->data, since
879 * the helper cannot expand the head. Should this ever
880 * changed in future for legacy-rx mode on, then lets also
881 * add xdp->data_meta handling here.
882 */
883
884 /* allocate a skb to store the frags */
885 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
886 if (unlikely(!skb))
887 return NULL;
888
889 /* Determine available headroom for copy */
890 headlen = size;
891 if (headlen > IXGBEVF_RX_HDR_SIZE)
892 headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
893
894 /* align pull length to size of long to optimize memcpy performance */
895 memcpy(__skb_put(skb, headlen), xdp->data,
896 ALIGN(headlen, sizeof(long)));
897
898 /* update all of the pointers */
899 size -= headlen;
900 if (size) {
901 skb_add_rx_frag(skb, 0, rx_buffer->page,
902 (xdp->data + headlen) -
903 page_address(rx_buffer->page),
904 size, truesize);
905 #if (PAGE_SIZE < 8192)
906 rx_buffer->page_offset ^= truesize;
907 #else
908 rx_buffer->page_offset += truesize;
909 #endif
910 } else {
911 rx_buffer->pagecnt_bias++;
912 }
913
914 return skb;
915 }
916
ixgbevf_irq_enable_queues(struct ixgbevf_adapter * adapter,u32 qmask)917 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
918 u32 qmask)
919 {
920 struct ixgbe_hw *hw = &adapter->hw;
921
922 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
923 }
924
ixgbevf_build_skb(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct xdp_buff * xdp,union ixgbe_adv_rx_desc * rx_desc)925 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
926 struct ixgbevf_rx_buffer *rx_buffer,
927 struct xdp_buff *xdp,
928 union ixgbe_adv_rx_desc *rx_desc)
929 {
930 unsigned int metasize = xdp->data - xdp->data_meta;
931 #if (PAGE_SIZE < 8192)
932 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
933 #else
934 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
935 SKB_DATA_ALIGN(xdp->data_end -
936 xdp->data_hard_start);
937 #endif
938 struct sk_buff *skb;
939
940 /* Prefetch first cache line of first page. If xdp->data_meta
941 * is unused, this points to xdp->data, otherwise, we likely
942 * have a consumer accessing first few bytes of meta data,
943 * and then actual data.
944 */
945 prefetch(xdp->data_meta);
946 #if L1_CACHE_BYTES < 128
947 prefetch(xdp->data_meta + L1_CACHE_BYTES);
948 #endif
949
950 /* build an skb around the page buffer */
951 skb = build_skb(xdp->data_hard_start, truesize);
952 if (unlikely(!skb))
953 return NULL;
954
955 /* update pointers within the skb to store the data */
956 skb_reserve(skb, xdp->data - xdp->data_hard_start);
957 __skb_put(skb, xdp->data_end - xdp->data);
958 if (metasize)
959 skb_metadata_set(skb, metasize);
960
961 /* update buffer offset */
962 #if (PAGE_SIZE < 8192)
963 rx_buffer->page_offset ^= truesize;
964 #else
965 rx_buffer->page_offset += truesize;
966 #endif
967
968 return skb;
969 }
970
971 #define IXGBEVF_XDP_PASS 0
972 #define IXGBEVF_XDP_CONSUMED 1
973 #define IXGBEVF_XDP_TX 2
974
ixgbevf_xmit_xdp_ring(struct ixgbevf_ring * ring,struct xdp_buff * xdp)975 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
976 struct xdp_buff *xdp)
977 {
978 struct ixgbevf_tx_buffer *tx_buffer;
979 union ixgbe_adv_tx_desc *tx_desc;
980 u32 len, cmd_type;
981 dma_addr_t dma;
982 u16 i;
983
984 len = xdp->data_end - xdp->data;
985
986 if (unlikely(!ixgbevf_desc_unused(ring)))
987 return IXGBEVF_XDP_CONSUMED;
988
989 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
990 if (dma_mapping_error(ring->dev, dma))
991 return IXGBEVF_XDP_CONSUMED;
992
993 /* record the location of the first descriptor for this packet */
994 i = ring->next_to_use;
995 tx_buffer = &ring->tx_buffer_info[i];
996
997 dma_unmap_len_set(tx_buffer, len, len);
998 dma_unmap_addr_set(tx_buffer, dma, dma);
999 tx_buffer->data = xdp->data;
1000 tx_buffer->bytecount = len;
1001 tx_buffer->gso_segs = 1;
1002 tx_buffer->protocol = 0;
1003
1004 /* Populate minimal context descriptor that will provide for the
1005 * fact that we are expected to process Ethernet frames.
1006 */
1007 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1008 struct ixgbe_adv_tx_context_desc *context_desc;
1009
1010 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1011
1012 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1013 context_desc->vlan_macip_lens =
1014 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1015 context_desc->seqnum_seed = 0;
1016 context_desc->type_tucmd_mlhl =
1017 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1018 IXGBE_ADVTXD_DTYP_CTXT);
1019 context_desc->mss_l4len_idx = 0;
1020
1021 i = 1;
1022 }
1023
1024 /* put descriptor type bits */
1025 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1026 IXGBE_ADVTXD_DCMD_DEXT |
1027 IXGBE_ADVTXD_DCMD_IFCS;
1028 cmd_type |= len | IXGBE_TXD_CMD;
1029
1030 tx_desc = IXGBEVF_TX_DESC(ring, i);
1031 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1032
1033 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1034 tx_desc->read.olinfo_status =
1035 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1036 IXGBE_ADVTXD_CC);
1037
1038 /* Avoid any potential race with cleanup */
1039 smp_wmb();
1040
1041 /* set next_to_watch value indicating a packet is present */
1042 i++;
1043 if (i == ring->count)
1044 i = 0;
1045
1046 tx_buffer->next_to_watch = tx_desc;
1047 ring->next_to_use = i;
1048
1049 return IXGBEVF_XDP_TX;
1050 }
1051
ixgbevf_run_xdp(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring,struct xdp_buff * xdp)1052 static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1053 struct ixgbevf_ring *rx_ring,
1054 struct xdp_buff *xdp)
1055 {
1056 int result = IXGBEVF_XDP_PASS;
1057 struct ixgbevf_ring *xdp_ring;
1058 struct bpf_prog *xdp_prog;
1059 u32 act;
1060
1061 rcu_read_lock();
1062 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1063
1064 if (!xdp_prog)
1065 goto xdp_out;
1066
1067 act = bpf_prog_run_xdp(xdp_prog, xdp);
1068 switch (act) {
1069 case XDP_PASS:
1070 break;
1071 case XDP_TX:
1072 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1073 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1074 break;
1075 default:
1076 bpf_warn_invalid_xdp_action(act);
1077 /* fallthrough */
1078 case XDP_ABORTED:
1079 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1080 /* fallthrough -- handle aborts by dropping packet */
1081 case XDP_DROP:
1082 result = IXGBEVF_XDP_CONSUMED;
1083 break;
1084 }
1085 xdp_out:
1086 rcu_read_unlock();
1087 return ERR_PTR(-result);
1088 }
1089
ixgbevf_rx_buffer_flip(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,unsigned int size)1090 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1091 struct ixgbevf_rx_buffer *rx_buffer,
1092 unsigned int size)
1093 {
1094 #if (PAGE_SIZE < 8192)
1095 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1096
1097 rx_buffer->page_offset ^= truesize;
1098 #else
1099 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1100 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1101 SKB_DATA_ALIGN(size);
1102
1103 rx_buffer->page_offset += truesize;
1104 #endif
1105 }
1106
ixgbevf_clean_rx_irq(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring * rx_ring,int budget)1107 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1108 struct ixgbevf_ring *rx_ring,
1109 int budget)
1110 {
1111 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1112 struct ixgbevf_adapter *adapter = q_vector->adapter;
1113 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1114 struct sk_buff *skb = rx_ring->skb;
1115 bool xdp_xmit = false;
1116 struct xdp_buff xdp;
1117
1118 xdp.rxq = &rx_ring->xdp_rxq;
1119
1120 while (likely(total_rx_packets < budget)) {
1121 struct ixgbevf_rx_buffer *rx_buffer;
1122 union ixgbe_adv_rx_desc *rx_desc;
1123 unsigned int size;
1124
1125 /* return some buffers to hardware, one at a time is too slow */
1126 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1127 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1128 cleaned_count = 0;
1129 }
1130
1131 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1132 size = le16_to_cpu(rx_desc->wb.upper.length);
1133 if (!size)
1134 break;
1135
1136 /* This memory barrier is needed to keep us from reading
1137 * any other fields out of the rx_desc until we know the
1138 * RXD_STAT_DD bit is set
1139 */
1140 rmb();
1141
1142 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1143
1144 /* retrieve a buffer from the ring */
1145 if (!skb) {
1146 xdp.data = page_address(rx_buffer->page) +
1147 rx_buffer->page_offset;
1148 xdp.data_meta = xdp.data;
1149 xdp.data_hard_start = xdp.data -
1150 ixgbevf_rx_offset(rx_ring);
1151 xdp.data_end = xdp.data + size;
1152
1153 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1154 }
1155
1156 if (IS_ERR(skb)) {
1157 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1158 xdp_xmit = true;
1159 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1160 size);
1161 } else {
1162 rx_buffer->pagecnt_bias++;
1163 }
1164 total_rx_packets++;
1165 total_rx_bytes += size;
1166 } else if (skb) {
1167 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1168 } else if (ring_uses_build_skb(rx_ring)) {
1169 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1170 &xdp, rx_desc);
1171 } else {
1172 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1173 &xdp, rx_desc);
1174 }
1175
1176 /* exit if we failed to retrieve a buffer */
1177 if (!skb) {
1178 rx_ring->rx_stats.alloc_rx_buff_failed++;
1179 rx_buffer->pagecnt_bias++;
1180 break;
1181 }
1182
1183 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1184 cleaned_count++;
1185
1186 /* fetch next buffer in frame if non-eop */
1187 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1188 continue;
1189
1190 /* verify the packet layout is correct */
1191 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1192 skb = NULL;
1193 continue;
1194 }
1195
1196 /* probably a little skewed due to removing CRC */
1197 total_rx_bytes += skb->len;
1198
1199 /* Workaround hardware that can't do proper VEPA multicast
1200 * source pruning.
1201 */
1202 if ((skb->pkt_type == PACKET_BROADCAST ||
1203 skb->pkt_type == PACKET_MULTICAST) &&
1204 ether_addr_equal(rx_ring->netdev->dev_addr,
1205 eth_hdr(skb)->h_source)) {
1206 dev_kfree_skb_irq(skb);
1207 continue;
1208 }
1209
1210 /* populate checksum, VLAN, and protocol */
1211 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1212
1213 ixgbevf_rx_skb(q_vector, skb);
1214
1215 /* reset skb pointer */
1216 skb = NULL;
1217
1218 /* update budget accounting */
1219 total_rx_packets++;
1220 }
1221
1222 /* place incomplete frames back on ring for completion */
1223 rx_ring->skb = skb;
1224
1225 if (xdp_xmit) {
1226 struct ixgbevf_ring *xdp_ring =
1227 adapter->xdp_ring[rx_ring->queue_index];
1228
1229 /* Force memory writes to complete before letting h/w
1230 * know there are new descriptors to fetch.
1231 */
1232 wmb();
1233 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1234 }
1235
1236 u64_stats_update_begin(&rx_ring->syncp);
1237 rx_ring->stats.packets += total_rx_packets;
1238 rx_ring->stats.bytes += total_rx_bytes;
1239 u64_stats_update_end(&rx_ring->syncp);
1240 q_vector->rx.total_packets += total_rx_packets;
1241 q_vector->rx.total_bytes += total_rx_bytes;
1242
1243 return total_rx_packets;
1244 }
1245
1246 /**
1247 * ixgbevf_poll - NAPI polling calback
1248 * @napi: napi struct with our devices info in it
1249 * @budget: amount of work driver is allowed to do this pass, in packets
1250 *
1251 * This function will clean more than one or more rings associated with a
1252 * q_vector.
1253 **/
ixgbevf_poll(struct napi_struct * napi,int budget)1254 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1255 {
1256 struct ixgbevf_q_vector *q_vector =
1257 container_of(napi, struct ixgbevf_q_vector, napi);
1258 struct ixgbevf_adapter *adapter = q_vector->adapter;
1259 struct ixgbevf_ring *ring;
1260 int per_ring_budget, work_done = 0;
1261 bool clean_complete = true;
1262
1263 ixgbevf_for_each_ring(ring, q_vector->tx) {
1264 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1265 clean_complete = false;
1266 }
1267
1268 if (budget <= 0)
1269 return budget;
1270
1271 /* attempt to distribute budget to each queue fairly, but don't allow
1272 * the budget to go below 1 because we'll exit polling
1273 */
1274 if (q_vector->rx.count > 1)
1275 per_ring_budget = max(budget/q_vector->rx.count, 1);
1276 else
1277 per_ring_budget = budget;
1278
1279 ixgbevf_for_each_ring(ring, q_vector->rx) {
1280 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1281 per_ring_budget);
1282 work_done += cleaned;
1283 if (cleaned >= per_ring_budget)
1284 clean_complete = false;
1285 }
1286
1287 /* If all work not completed, return budget and keep polling */
1288 if (!clean_complete)
1289 return budget;
1290 /* all work done, exit the polling mode */
1291 napi_complete_done(napi, work_done);
1292 if (adapter->rx_itr_setting == 1)
1293 ixgbevf_set_itr(q_vector);
1294 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1295 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1296 ixgbevf_irq_enable_queues(adapter,
1297 BIT(q_vector->v_idx));
1298
1299 return 0;
1300 }
1301
1302 /**
1303 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1304 * @q_vector: structure containing interrupt and ring information
1305 **/
ixgbevf_write_eitr(struct ixgbevf_q_vector * q_vector)1306 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1307 {
1308 struct ixgbevf_adapter *adapter = q_vector->adapter;
1309 struct ixgbe_hw *hw = &adapter->hw;
1310 int v_idx = q_vector->v_idx;
1311 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1312
1313 /* set the WDIS bit to not clear the timer bits and cause an
1314 * immediate assertion of the interrupt
1315 */
1316 itr_reg |= IXGBE_EITR_CNT_WDIS;
1317
1318 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1319 }
1320
1321 /**
1322 * ixgbevf_configure_msix - Configure MSI-X hardware
1323 * @adapter: board private structure
1324 *
1325 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1326 * interrupts.
1327 **/
ixgbevf_configure_msix(struct ixgbevf_adapter * adapter)1328 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1329 {
1330 struct ixgbevf_q_vector *q_vector;
1331 int q_vectors, v_idx;
1332
1333 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1334 adapter->eims_enable_mask = 0;
1335
1336 /* Populate the IVAR table and set the ITR values to the
1337 * corresponding register.
1338 */
1339 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1340 struct ixgbevf_ring *ring;
1341
1342 q_vector = adapter->q_vector[v_idx];
1343
1344 ixgbevf_for_each_ring(ring, q_vector->rx)
1345 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1346
1347 ixgbevf_for_each_ring(ring, q_vector->tx)
1348 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1349
1350 if (q_vector->tx.ring && !q_vector->rx.ring) {
1351 /* Tx only vector */
1352 if (adapter->tx_itr_setting == 1)
1353 q_vector->itr = IXGBE_12K_ITR;
1354 else
1355 q_vector->itr = adapter->tx_itr_setting;
1356 } else {
1357 /* Rx or Rx/Tx vector */
1358 if (adapter->rx_itr_setting == 1)
1359 q_vector->itr = IXGBE_20K_ITR;
1360 else
1361 q_vector->itr = adapter->rx_itr_setting;
1362 }
1363
1364 /* add q_vector eims value to global eims_enable_mask */
1365 adapter->eims_enable_mask |= BIT(v_idx);
1366
1367 ixgbevf_write_eitr(q_vector);
1368 }
1369
1370 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1371 /* setup eims_other and add value to global eims_enable_mask */
1372 adapter->eims_other = BIT(v_idx);
1373 adapter->eims_enable_mask |= adapter->eims_other;
1374 }
1375
1376 enum latency_range {
1377 lowest_latency = 0,
1378 low_latency = 1,
1379 bulk_latency = 2,
1380 latency_invalid = 255
1381 };
1382
1383 /**
1384 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1385 * @q_vector: structure containing interrupt and ring information
1386 * @ring_container: structure containing ring performance data
1387 *
1388 * Stores a new ITR value based on packets and byte
1389 * counts during the last interrupt. The advantage of per interrupt
1390 * computation is faster updates and more accurate ITR for the current
1391 * traffic pattern. Constants in this function were computed
1392 * based on theoretical maximum wire speed and thresholds were set based
1393 * on testing data as well as attempting to minimize response time
1394 * while increasing bulk throughput.
1395 **/
ixgbevf_update_itr(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring_container * ring_container)1396 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1397 struct ixgbevf_ring_container *ring_container)
1398 {
1399 int bytes = ring_container->total_bytes;
1400 int packets = ring_container->total_packets;
1401 u32 timepassed_us;
1402 u64 bytes_perint;
1403 u8 itr_setting = ring_container->itr;
1404
1405 if (packets == 0)
1406 return;
1407
1408 /* simple throttle rate management
1409 * 0-20MB/s lowest (100000 ints/s)
1410 * 20-100MB/s low (20000 ints/s)
1411 * 100-1249MB/s bulk (12000 ints/s)
1412 */
1413 /* what was last interrupt timeslice? */
1414 timepassed_us = q_vector->itr >> 2;
1415 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1416
1417 switch (itr_setting) {
1418 case lowest_latency:
1419 if (bytes_perint > 10)
1420 itr_setting = low_latency;
1421 break;
1422 case low_latency:
1423 if (bytes_perint > 20)
1424 itr_setting = bulk_latency;
1425 else if (bytes_perint <= 10)
1426 itr_setting = lowest_latency;
1427 break;
1428 case bulk_latency:
1429 if (bytes_perint <= 20)
1430 itr_setting = low_latency;
1431 break;
1432 }
1433
1434 /* clear work counters since we have the values we need */
1435 ring_container->total_bytes = 0;
1436 ring_container->total_packets = 0;
1437
1438 /* write updated itr to ring container */
1439 ring_container->itr = itr_setting;
1440 }
1441
ixgbevf_set_itr(struct ixgbevf_q_vector * q_vector)1442 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1443 {
1444 u32 new_itr = q_vector->itr;
1445 u8 current_itr;
1446
1447 ixgbevf_update_itr(q_vector, &q_vector->tx);
1448 ixgbevf_update_itr(q_vector, &q_vector->rx);
1449
1450 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1451
1452 switch (current_itr) {
1453 /* counts and packets in update_itr are dependent on these numbers */
1454 case lowest_latency:
1455 new_itr = IXGBE_100K_ITR;
1456 break;
1457 case low_latency:
1458 new_itr = IXGBE_20K_ITR;
1459 break;
1460 case bulk_latency:
1461 new_itr = IXGBE_12K_ITR;
1462 break;
1463 default:
1464 break;
1465 }
1466
1467 if (new_itr != q_vector->itr) {
1468 /* do an exponential smoothing */
1469 new_itr = (10 * new_itr * q_vector->itr) /
1470 ((9 * new_itr) + q_vector->itr);
1471
1472 /* save the algorithm value here */
1473 q_vector->itr = new_itr;
1474
1475 ixgbevf_write_eitr(q_vector);
1476 }
1477 }
1478
ixgbevf_msix_other(int irq,void * data)1479 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1480 {
1481 struct ixgbevf_adapter *adapter = data;
1482 struct ixgbe_hw *hw = &adapter->hw;
1483
1484 hw->mac.get_link_status = 1;
1485
1486 ixgbevf_service_event_schedule(adapter);
1487
1488 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1489
1490 return IRQ_HANDLED;
1491 }
1492
1493 /**
1494 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1495 * @irq: unused
1496 * @data: pointer to our q_vector struct for this interrupt vector
1497 **/
ixgbevf_msix_clean_rings(int irq,void * data)1498 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1499 {
1500 struct ixgbevf_q_vector *q_vector = data;
1501
1502 /* EIAM disabled interrupts (on this vector) for us */
1503 if (q_vector->rx.ring || q_vector->tx.ring)
1504 napi_schedule_irqoff(&q_vector->napi);
1505
1506 return IRQ_HANDLED;
1507 }
1508
1509 /**
1510 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1511 * @adapter: board private structure
1512 *
1513 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1514 * interrupts from the kernel.
1515 **/
ixgbevf_request_msix_irqs(struct ixgbevf_adapter * adapter)1516 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1517 {
1518 struct net_device *netdev = adapter->netdev;
1519 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1520 unsigned int ri = 0, ti = 0;
1521 int vector, err;
1522
1523 for (vector = 0; vector < q_vectors; vector++) {
1524 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1525 struct msix_entry *entry = &adapter->msix_entries[vector];
1526
1527 if (q_vector->tx.ring && q_vector->rx.ring) {
1528 snprintf(q_vector->name, sizeof(q_vector->name),
1529 "%s-TxRx-%u", netdev->name, ri++);
1530 ti++;
1531 } else if (q_vector->rx.ring) {
1532 snprintf(q_vector->name, sizeof(q_vector->name),
1533 "%s-rx-%u", netdev->name, ri++);
1534 } else if (q_vector->tx.ring) {
1535 snprintf(q_vector->name, sizeof(q_vector->name),
1536 "%s-tx-%u", netdev->name, ti++);
1537 } else {
1538 /* skip this unused q_vector */
1539 continue;
1540 }
1541 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1542 q_vector->name, q_vector);
1543 if (err) {
1544 hw_dbg(&adapter->hw,
1545 "request_irq failed for MSIX interrupt Error: %d\n",
1546 err);
1547 goto free_queue_irqs;
1548 }
1549 }
1550
1551 err = request_irq(adapter->msix_entries[vector].vector,
1552 &ixgbevf_msix_other, 0, netdev->name, adapter);
1553 if (err) {
1554 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1555 err);
1556 goto free_queue_irqs;
1557 }
1558
1559 return 0;
1560
1561 free_queue_irqs:
1562 while (vector) {
1563 vector--;
1564 free_irq(adapter->msix_entries[vector].vector,
1565 adapter->q_vector[vector]);
1566 }
1567 /* This failure is non-recoverable - it indicates the system is
1568 * out of MSIX vector resources and the VF driver cannot run
1569 * without them. Set the number of msix vectors to zero
1570 * indicating that not enough can be allocated. The error
1571 * will be returned to the user indicating device open failed.
1572 * Any further attempts to force the driver to open will also
1573 * fail. The only way to recover is to unload the driver and
1574 * reload it again. If the system has recovered some MSIX
1575 * vectors then it may succeed.
1576 */
1577 adapter->num_msix_vectors = 0;
1578 return err;
1579 }
1580
1581 /**
1582 * ixgbevf_request_irq - initialize interrupts
1583 * @adapter: board private structure
1584 *
1585 * Attempts to configure interrupts using the best available
1586 * capabilities of the hardware and kernel.
1587 **/
ixgbevf_request_irq(struct ixgbevf_adapter * adapter)1588 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1589 {
1590 int err = ixgbevf_request_msix_irqs(adapter);
1591
1592 if (err)
1593 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1594
1595 return err;
1596 }
1597
ixgbevf_free_irq(struct ixgbevf_adapter * adapter)1598 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1599 {
1600 int i, q_vectors;
1601
1602 if (!adapter->msix_entries)
1603 return;
1604
1605 q_vectors = adapter->num_msix_vectors;
1606 i = q_vectors - 1;
1607
1608 free_irq(adapter->msix_entries[i].vector, adapter);
1609 i--;
1610
1611 for (; i >= 0; i--) {
1612 /* free only the irqs that were actually requested */
1613 if (!adapter->q_vector[i]->rx.ring &&
1614 !adapter->q_vector[i]->tx.ring)
1615 continue;
1616
1617 free_irq(adapter->msix_entries[i].vector,
1618 adapter->q_vector[i]);
1619 }
1620 }
1621
1622 /**
1623 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1624 * @adapter: board private structure
1625 **/
ixgbevf_irq_disable(struct ixgbevf_adapter * adapter)1626 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1627 {
1628 struct ixgbe_hw *hw = &adapter->hw;
1629 int i;
1630
1631 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1632 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1633 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1634
1635 IXGBE_WRITE_FLUSH(hw);
1636
1637 for (i = 0; i < adapter->num_msix_vectors; i++)
1638 synchronize_irq(adapter->msix_entries[i].vector);
1639 }
1640
1641 /**
1642 * ixgbevf_irq_enable - Enable default interrupt generation settings
1643 * @adapter: board private structure
1644 **/
ixgbevf_irq_enable(struct ixgbevf_adapter * adapter)1645 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1646 {
1647 struct ixgbe_hw *hw = &adapter->hw;
1648
1649 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1650 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1651 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1652 }
1653
1654 /**
1655 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1656 * @adapter: board private structure
1657 * @ring: structure containing ring specific data
1658 *
1659 * Configure the Tx descriptor ring after a reset.
1660 **/
ixgbevf_configure_tx_ring(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1661 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1662 struct ixgbevf_ring *ring)
1663 {
1664 struct ixgbe_hw *hw = &adapter->hw;
1665 u64 tdba = ring->dma;
1666 int wait_loop = 10;
1667 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1668 u8 reg_idx = ring->reg_idx;
1669
1670 /* disable queue to avoid issues while updating state */
1671 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1672 IXGBE_WRITE_FLUSH(hw);
1673
1674 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1675 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1676 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1677 ring->count * sizeof(union ixgbe_adv_tx_desc));
1678
1679 /* disable head writeback */
1680 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1681 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1682
1683 /* enable relaxed ordering */
1684 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1685 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1686 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1687
1688 /* reset head and tail pointers */
1689 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1690 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1691 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1692
1693 /* reset ntu and ntc to place SW in sync with hardwdare */
1694 ring->next_to_clean = 0;
1695 ring->next_to_use = 0;
1696
1697 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1698 * to or less than the number of on chip descriptors, which is
1699 * currently 40.
1700 */
1701 txdctl |= (8 << 16); /* WTHRESH = 8 */
1702
1703 /* Setting PTHRESH to 32 both improves performance */
1704 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1705 32; /* PTHRESH = 32 */
1706
1707 /* reinitialize tx_buffer_info */
1708 memset(ring->tx_buffer_info, 0,
1709 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1710
1711 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1712 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1713
1714 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1715
1716 /* poll to verify queue is enabled */
1717 do {
1718 usleep_range(1000, 2000);
1719 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1720 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1721 if (!wait_loop)
1722 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1723 }
1724
1725 /**
1726 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1727 * @adapter: board private structure
1728 *
1729 * Configure the Tx unit of the MAC after a reset.
1730 **/
ixgbevf_configure_tx(struct ixgbevf_adapter * adapter)1731 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1732 {
1733 u32 i;
1734
1735 /* Setup the HW Tx Head and Tail descriptor pointers */
1736 for (i = 0; i < adapter->num_tx_queues; i++)
1737 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1738 for (i = 0; i < adapter->num_xdp_queues; i++)
1739 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1740 }
1741
1742 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1743
ixgbevf_configure_srrctl(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring,int index)1744 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1745 struct ixgbevf_ring *ring, int index)
1746 {
1747 struct ixgbe_hw *hw = &adapter->hw;
1748 u32 srrctl;
1749
1750 srrctl = IXGBE_SRRCTL_DROP_EN;
1751
1752 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1753 if (ring_uses_large_buffer(ring))
1754 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1755 else
1756 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1757 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1758
1759 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1760 }
1761
ixgbevf_setup_psrtype(struct ixgbevf_adapter * adapter)1762 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1763 {
1764 struct ixgbe_hw *hw = &adapter->hw;
1765
1766 /* PSRTYPE must be initialized in 82599 */
1767 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1768 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1769 IXGBE_PSRTYPE_L2HDR;
1770
1771 if (adapter->num_rx_queues > 1)
1772 psrtype |= BIT(29);
1773
1774 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1775 }
1776
1777 #define IXGBEVF_MAX_RX_DESC_POLL 10
ixgbevf_disable_rx_queue(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1778 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1779 struct ixgbevf_ring *ring)
1780 {
1781 struct ixgbe_hw *hw = &adapter->hw;
1782 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1783 u32 rxdctl;
1784 u8 reg_idx = ring->reg_idx;
1785
1786 if (IXGBE_REMOVED(hw->hw_addr))
1787 return;
1788 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1789 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1790
1791 /* write value back with RXDCTL.ENABLE bit cleared */
1792 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1793
1794 /* the hardware may take up to 100us to really disable the Rx queue */
1795 do {
1796 udelay(10);
1797 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1798 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1799
1800 if (!wait_loop)
1801 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1802 reg_idx);
1803 }
1804
ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1805 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1806 struct ixgbevf_ring *ring)
1807 {
1808 struct ixgbe_hw *hw = &adapter->hw;
1809 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1810 u32 rxdctl;
1811 u8 reg_idx = ring->reg_idx;
1812
1813 if (IXGBE_REMOVED(hw->hw_addr))
1814 return;
1815 do {
1816 usleep_range(1000, 2000);
1817 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1818 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1819
1820 if (!wait_loop)
1821 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1822 reg_idx);
1823 }
1824
1825 /**
1826 * ixgbevf_init_rss_key - Initialize adapter RSS key
1827 * @adapter: device handle
1828 *
1829 * Allocates and initializes the RSS key if it is not allocated.
1830 **/
ixgbevf_init_rss_key(struct ixgbevf_adapter * adapter)1831 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1832 {
1833 u32 *rss_key;
1834
1835 if (!adapter->rss_key) {
1836 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1837 if (unlikely(!rss_key))
1838 return -ENOMEM;
1839
1840 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1841 adapter->rss_key = rss_key;
1842 }
1843
1844 return 0;
1845 }
1846
ixgbevf_setup_vfmrqc(struct ixgbevf_adapter * adapter)1847 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1848 {
1849 struct ixgbe_hw *hw = &adapter->hw;
1850 u32 vfmrqc = 0, vfreta = 0;
1851 u16 rss_i = adapter->num_rx_queues;
1852 u8 i, j;
1853
1854 /* Fill out hash function seeds */
1855 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1856 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1857
1858 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1859 if (j == rss_i)
1860 j = 0;
1861
1862 adapter->rss_indir_tbl[i] = j;
1863
1864 vfreta |= j << (i & 0x3) * 8;
1865 if ((i & 3) == 3) {
1866 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1867 vfreta = 0;
1868 }
1869 }
1870
1871 /* Perform hash on these packet types */
1872 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1873 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1874 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1875 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1876
1877 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1878
1879 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1880 }
1881
ixgbevf_configure_rx_ring(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1882 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1883 struct ixgbevf_ring *ring)
1884 {
1885 struct ixgbe_hw *hw = &adapter->hw;
1886 union ixgbe_adv_rx_desc *rx_desc;
1887 u64 rdba = ring->dma;
1888 u32 rxdctl;
1889 u8 reg_idx = ring->reg_idx;
1890
1891 /* disable queue to avoid issues while updating state */
1892 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1893 ixgbevf_disable_rx_queue(adapter, ring);
1894
1895 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1896 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1897 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1898 ring->count * sizeof(union ixgbe_adv_rx_desc));
1899
1900 #ifndef CONFIG_SPARC
1901 /* enable relaxed ordering */
1902 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1903 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1904 #else
1905 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1906 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1907 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1908 #endif
1909
1910 /* reset head and tail pointers */
1911 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1912 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1913 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1914
1915 /* initialize rx_buffer_info */
1916 memset(ring->rx_buffer_info, 0,
1917 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1918
1919 /* initialize Rx descriptor 0 */
1920 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1921 rx_desc->wb.upper.length = 0;
1922
1923 /* reset ntu and ntc to place SW in sync with hardwdare */
1924 ring->next_to_clean = 0;
1925 ring->next_to_use = 0;
1926 ring->next_to_alloc = 0;
1927
1928 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1929
1930 /* RXDCTL.RLPML does not work on 82599 */
1931 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1932 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1933 IXGBE_RXDCTL_RLPML_EN);
1934
1935 #if (PAGE_SIZE < 8192)
1936 /* Limit the maximum frame size so we don't overrun the skb */
1937 if (ring_uses_build_skb(ring) &&
1938 !ring_uses_large_buffer(ring))
1939 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1940 IXGBE_RXDCTL_RLPML_EN;
1941 #endif
1942 }
1943
1944 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1945 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1946
1947 ixgbevf_rx_desc_queue_enable(adapter, ring);
1948 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1949 }
1950
ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring)1951 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1952 struct ixgbevf_ring *rx_ring)
1953 {
1954 struct net_device *netdev = adapter->netdev;
1955 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1956
1957 /* set build_skb and buffer size flags */
1958 clear_ring_build_skb_enabled(rx_ring);
1959 clear_ring_uses_large_buffer(rx_ring);
1960
1961 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1962 return;
1963
1964 set_ring_build_skb_enabled(rx_ring);
1965
1966 if (PAGE_SIZE < 8192) {
1967 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1968 return;
1969
1970 set_ring_uses_large_buffer(rx_ring);
1971 }
1972 }
1973
1974 /**
1975 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1976 * @adapter: board private structure
1977 *
1978 * Configure the Rx unit of the MAC after a reset.
1979 **/
ixgbevf_configure_rx(struct ixgbevf_adapter * adapter)1980 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1981 {
1982 struct ixgbe_hw *hw = &adapter->hw;
1983 struct net_device *netdev = adapter->netdev;
1984 int i, ret;
1985
1986 ixgbevf_setup_psrtype(adapter);
1987 if (hw->mac.type >= ixgbe_mac_X550_vf)
1988 ixgbevf_setup_vfmrqc(adapter);
1989
1990 spin_lock_bh(&adapter->mbx_lock);
1991 /* notify the PF of our intent to use this size of frame */
1992 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1993 spin_unlock_bh(&adapter->mbx_lock);
1994 if (ret)
1995 dev_err(&adapter->pdev->dev,
1996 "Failed to set MTU at %d\n", netdev->mtu);
1997
1998 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1999 * the Base and Length of the Rx Descriptor Ring
2000 */
2001 for (i = 0; i < adapter->num_rx_queues; i++) {
2002 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2003
2004 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2005 ixgbevf_configure_rx_ring(adapter, rx_ring);
2006 }
2007 }
2008
ixgbevf_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2009 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2010 __be16 proto, u16 vid)
2011 {
2012 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2013 struct ixgbe_hw *hw = &adapter->hw;
2014 int err;
2015
2016 spin_lock_bh(&adapter->mbx_lock);
2017
2018 /* add VID to filter table */
2019 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2020
2021 spin_unlock_bh(&adapter->mbx_lock);
2022
2023 /* translate error return types so error makes sense */
2024 if (err == IXGBE_ERR_MBX)
2025 return -EIO;
2026
2027 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2028 return -EACCES;
2029
2030 set_bit(vid, adapter->active_vlans);
2031
2032 return err;
2033 }
2034
ixgbevf_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2035 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2036 __be16 proto, u16 vid)
2037 {
2038 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2039 struct ixgbe_hw *hw = &adapter->hw;
2040 int err;
2041
2042 spin_lock_bh(&adapter->mbx_lock);
2043
2044 /* remove VID from filter table */
2045 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2046
2047 spin_unlock_bh(&adapter->mbx_lock);
2048
2049 clear_bit(vid, adapter->active_vlans);
2050
2051 return err;
2052 }
2053
ixgbevf_restore_vlan(struct ixgbevf_adapter * adapter)2054 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2055 {
2056 u16 vid;
2057
2058 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2059 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2060 htons(ETH_P_8021Q), vid);
2061 }
2062
ixgbevf_write_uc_addr_list(struct net_device * netdev)2063 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2064 {
2065 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2066 struct ixgbe_hw *hw = &adapter->hw;
2067 int count = 0;
2068
2069 if (!netdev_uc_empty(netdev)) {
2070 struct netdev_hw_addr *ha;
2071
2072 netdev_for_each_uc_addr(ha, netdev) {
2073 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2074 udelay(200);
2075 }
2076 } else {
2077 /* If the list is empty then send message to PF driver to
2078 * clear all MAC VLANs on this VF.
2079 */
2080 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2081 }
2082
2083 return count;
2084 }
2085
2086 /**
2087 * ixgbevf_set_rx_mode - Multicast and unicast set
2088 * @netdev: network interface device structure
2089 *
2090 * The set_rx_method entry point is called whenever the multicast address
2091 * list, unicast address list or the network interface flags are updated.
2092 * This routine is responsible for configuring the hardware for proper
2093 * multicast mode and configuring requested unicast filters.
2094 **/
ixgbevf_set_rx_mode(struct net_device * netdev)2095 static void ixgbevf_set_rx_mode(struct net_device *netdev)
2096 {
2097 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2098 struct ixgbe_hw *hw = &adapter->hw;
2099 unsigned int flags = netdev->flags;
2100 int xcast_mode;
2101
2102 /* request the most inclusive mode we need */
2103 if (flags & IFF_PROMISC)
2104 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2105 else if (flags & IFF_ALLMULTI)
2106 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2107 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2108 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2109 else
2110 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2111
2112 spin_lock_bh(&adapter->mbx_lock);
2113
2114 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2115
2116 /* reprogram multicast list */
2117 hw->mac.ops.update_mc_addr_list(hw, netdev);
2118
2119 ixgbevf_write_uc_addr_list(netdev);
2120
2121 spin_unlock_bh(&adapter->mbx_lock);
2122 }
2123
ixgbevf_napi_enable_all(struct ixgbevf_adapter * adapter)2124 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2125 {
2126 int q_idx;
2127 struct ixgbevf_q_vector *q_vector;
2128 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2129
2130 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2131 q_vector = adapter->q_vector[q_idx];
2132 napi_enable(&q_vector->napi);
2133 }
2134 }
2135
ixgbevf_napi_disable_all(struct ixgbevf_adapter * adapter)2136 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2137 {
2138 int q_idx;
2139 struct ixgbevf_q_vector *q_vector;
2140 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2141
2142 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2143 q_vector = adapter->q_vector[q_idx];
2144 napi_disable(&q_vector->napi);
2145 }
2146 }
2147
ixgbevf_configure_dcb(struct ixgbevf_adapter * adapter)2148 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2149 {
2150 struct ixgbe_hw *hw = &adapter->hw;
2151 unsigned int def_q = 0;
2152 unsigned int num_tcs = 0;
2153 unsigned int num_rx_queues = adapter->num_rx_queues;
2154 unsigned int num_tx_queues = adapter->num_tx_queues;
2155 int err;
2156
2157 spin_lock_bh(&adapter->mbx_lock);
2158
2159 /* fetch queue configuration from the PF */
2160 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2161
2162 spin_unlock_bh(&adapter->mbx_lock);
2163
2164 if (err)
2165 return err;
2166
2167 if (num_tcs > 1) {
2168 /* we need only one Tx queue */
2169 num_tx_queues = 1;
2170
2171 /* update default Tx ring register index */
2172 adapter->tx_ring[0]->reg_idx = def_q;
2173
2174 /* we need as many queues as traffic classes */
2175 num_rx_queues = num_tcs;
2176 }
2177
2178 /* if we have a bad config abort request queue reset */
2179 if ((adapter->num_rx_queues != num_rx_queues) ||
2180 (adapter->num_tx_queues != num_tx_queues)) {
2181 /* force mailbox timeout to prevent further messages */
2182 hw->mbx.timeout = 0;
2183
2184 /* wait for watchdog to come around and bail us out */
2185 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2186 }
2187
2188 return 0;
2189 }
2190
ixgbevf_configure(struct ixgbevf_adapter * adapter)2191 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2192 {
2193 ixgbevf_configure_dcb(adapter);
2194
2195 ixgbevf_set_rx_mode(adapter->netdev);
2196
2197 ixgbevf_restore_vlan(adapter);
2198
2199 ixgbevf_configure_tx(adapter);
2200 ixgbevf_configure_rx(adapter);
2201 }
2202
ixgbevf_save_reset_stats(struct ixgbevf_adapter * adapter)2203 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2204 {
2205 /* Only save pre-reset stats if there are some */
2206 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2207 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2208 adapter->stats.base_vfgprc;
2209 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2210 adapter->stats.base_vfgptc;
2211 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2212 adapter->stats.base_vfgorc;
2213 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2214 adapter->stats.base_vfgotc;
2215 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2216 adapter->stats.base_vfmprc;
2217 }
2218 }
2219
ixgbevf_init_last_counter_stats(struct ixgbevf_adapter * adapter)2220 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2221 {
2222 struct ixgbe_hw *hw = &adapter->hw;
2223
2224 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2225 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2226 adapter->stats.last_vfgorc |=
2227 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2228 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2229 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2230 adapter->stats.last_vfgotc |=
2231 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2232 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2233
2234 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2235 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2236 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2237 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2238 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2239 }
2240
ixgbevf_negotiate_api(struct ixgbevf_adapter * adapter)2241 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2242 {
2243 struct ixgbe_hw *hw = &adapter->hw;
2244 int api[] = { ixgbe_mbox_api_13,
2245 ixgbe_mbox_api_12,
2246 ixgbe_mbox_api_11,
2247 ixgbe_mbox_api_10,
2248 ixgbe_mbox_api_unknown };
2249 int err, idx = 0;
2250
2251 spin_lock_bh(&adapter->mbx_lock);
2252
2253 while (api[idx] != ixgbe_mbox_api_unknown) {
2254 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2255 if (!err)
2256 break;
2257 idx++;
2258 }
2259
2260 spin_unlock_bh(&adapter->mbx_lock);
2261 }
2262
ixgbevf_up_complete(struct ixgbevf_adapter * adapter)2263 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2264 {
2265 struct net_device *netdev = adapter->netdev;
2266 struct ixgbe_hw *hw = &adapter->hw;
2267
2268 ixgbevf_configure_msix(adapter);
2269
2270 spin_lock_bh(&adapter->mbx_lock);
2271
2272 if (is_valid_ether_addr(hw->mac.addr))
2273 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2274 else
2275 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2276
2277 spin_unlock_bh(&adapter->mbx_lock);
2278
2279 smp_mb__before_atomic();
2280 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2281 ixgbevf_napi_enable_all(adapter);
2282
2283 /* clear any pending interrupts, may auto mask */
2284 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2285 ixgbevf_irq_enable(adapter);
2286
2287 /* enable transmits */
2288 netif_tx_start_all_queues(netdev);
2289
2290 ixgbevf_save_reset_stats(adapter);
2291 ixgbevf_init_last_counter_stats(adapter);
2292
2293 hw->mac.get_link_status = 1;
2294 mod_timer(&adapter->service_timer, jiffies);
2295 }
2296
ixgbevf_up(struct ixgbevf_adapter * adapter)2297 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2298 {
2299 ixgbevf_configure(adapter);
2300
2301 ixgbevf_up_complete(adapter);
2302 }
2303
2304 /**
2305 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2306 * @rx_ring: ring to free buffers from
2307 **/
ixgbevf_clean_rx_ring(struct ixgbevf_ring * rx_ring)2308 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2309 {
2310 u16 i = rx_ring->next_to_clean;
2311
2312 /* Free Rx ring sk_buff */
2313 if (rx_ring->skb) {
2314 dev_kfree_skb(rx_ring->skb);
2315 rx_ring->skb = NULL;
2316 }
2317
2318 /* Free all the Rx ring pages */
2319 while (i != rx_ring->next_to_alloc) {
2320 struct ixgbevf_rx_buffer *rx_buffer;
2321
2322 rx_buffer = &rx_ring->rx_buffer_info[i];
2323
2324 /* Invalidate cache lines that may have been written to by
2325 * device so that we avoid corrupting memory.
2326 */
2327 dma_sync_single_range_for_cpu(rx_ring->dev,
2328 rx_buffer->dma,
2329 rx_buffer->page_offset,
2330 ixgbevf_rx_bufsz(rx_ring),
2331 DMA_FROM_DEVICE);
2332
2333 /* free resources associated with mapping */
2334 dma_unmap_page_attrs(rx_ring->dev,
2335 rx_buffer->dma,
2336 ixgbevf_rx_pg_size(rx_ring),
2337 DMA_FROM_DEVICE,
2338 IXGBEVF_RX_DMA_ATTR);
2339
2340 __page_frag_cache_drain(rx_buffer->page,
2341 rx_buffer->pagecnt_bias);
2342
2343 i++;
2344 if (i == rx_ring->count)
2345 i = 0;
2346 }
2347
2348 rx_ring->next_to_alloc = 0;
2349 rx_ring->next_to_clean = 0;
2350 rx_ring->next_to_use = 0;
2351 }
2352
2353 /**
2354 * ixgbevf_clean_tx_ring - Free Tx Buffers
2355 * @tx_ring: ring to be cleaned
2356 **/
ixgbevf_clean_tx_ring(struct ixgbevf_ring * tx_ring)2357 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2358 {
2359 u16 i = tx_ring->next_to_clean;
2360 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2361
2362 while (i != tx_ring->next_to_use) {
2363 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2364
2365 /* Free all the Tx ring sk_buffs */
2366 if (ring_is_xdp(tx_ring))
2367 page_frag_free(tx_buffer->data);
2368 else
2369 dev_kfree_skb_any(tx_buffer->skb);
2370
2371 /* unmap skb header data */
2372 dma_unmap_single(tx_ring->dev,
2373 dma_unmap_addr(tx_buffer, dma),
2374 dma_unmap_len(tx_buffer, len),
2375 DMA_TO_DEVICE);
2376
2377 /* check for eop_desc to determine the end of the packet */
2378 eop_desc = tx_buffer->next_to_watch;
2379 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2380
2381 /* unmap remaining buffers */
2382 while (tx_desc != eop_desc) {
2383 tx_buffer++;
2384 tx_desc++;
2385 i++;
2386 if (unlikely(i == tx_ring->count)) {
2387 i = 0;
2388 tx_buffer = tx_ring->tx_buffer_info;
2389 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2390 }
2391
2392 /* unmap any remaining paged data */
2393 if (dma_unmap_len(tx_buffer, len))
2394 dma_unmap_page(tx_ring->dev,
2395 dma_unmap_addr(tx_buffer, dma),
2396 dma_unmap_len(tx_buffer, len),
2397 DMA_TO_DEVICE);
2398 }
2399
2400 /* move us one more past the eop_desc for start of next pkt */
2401 tx_buffer++;
2402 i++;
2403 if (unlikely(i == tx_ring->count)) {
2404 i = 0;
2405 tx_buffer = tx_ring->tx_buffer_info;
2406 }
2407 }
2408
2409 /* reset next_to_use and next_to_clean */
2410 tx_ring->next_to_use = 0;
2411 tx_ring->next_to_clean = 0;
2412
2413 }
2414
2415 /**
2416 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2417 * @adapter: board private structure
2418 **/
ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter * adapter)2419 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2420 {
2421 int i;
2422
2423 for (i = 0; i < adapter->num_rx_queues; i++)
2424 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2425 }
2426
2427 /**
2428 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2429 * @adapter: board private structure
2430 **/
ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter * adapter)2431 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2432 {
2433 int i;
2434
2435 for (i = 0; i < adapter->num_tx_queues; i++)
2436 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2437 for (i = 0; i < adapter->num_xdp_queues; i++)
2438 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2439 }
2440
ixgbevf_down(struct ixgbevf_adapter * adapter)2441 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2442 {
2443 struct net_device *netdev = adapter->netdev;
2444 struct ixgbe_hw *hw = &adapter->hw;
2445 int i;
2446
2447 /* signal that we are down to the interrupt handler */
2448 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2449 return; /* do nothing if already down */
2450
2451 /* disable all enabled Rx queues */
2452 for (i = 0; i < adapter->num_rx_queues; i++)
2453 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2454
2455 usleep_range(10000, 20000);
2456
2457 netif_tx_stop_all_queues(netdev);
2458
2459 /* call carrier off first to avoid false dev_watchdog timeouts */
2460 netif_carrier_off(netdev);
2461 netif_tx_disable(netdev);
2462
2463 ixgbevf_irq_disable(adapter);
2464
2465 ixgbevf_napi_disable_all(adapter);
2466
2467 del_timer_sync(&adapter->service_timer);
2468
2469 /* disable transmits in the hardware now that interrupts are off */
2470 for (i = 0; i < adapter->num_tx_queues; i++) {
2471 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2472
2473 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2474 IXGBE_TXDCTL_SWFLSH);
2475 }
2476
2477 for (i = 0; i < adapter->num_xdp_queues; i++) {
2478 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2479
2480 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2481 IXGBE_TXDCTL_SWFLSH);
2482 }
2483
2484 if (!pci_channel_offline(adapter->pdev))
2485 ixgbevf_reset(adapter);
2486
2487 ixgbevf_clean_all_tx_rings(adapter);
2488 ixgbevf_clean_all_rx_rings(adapter);
2489 }
2490
ixgbevf_reinit_locked(struct ixgbevf_adapter * adapter)2491 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2492 {
2493 WARN_ON(in_interrupt());
2494
2495 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2496 msleep(1);
2497
2498 ixgbevf_down(adapter);
2499 ixgbevf_up(adapter);
2500
2501 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2502 }
2503
ixgbevf_reset(struct ixgbevf_adapter * adapter)2504 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2505 {
2506 struct ixgbe_hw *hw = &adapter->hw;
2507 struct net_device *netdev = adapter->netdev;
2508
2509 if (hw->mac.ops.reset_hw(hw)) {
2510 hw_dbg(hw, "PF still resetting\n");
2511 } else {
2512 hw->mac.ops.init_hw(hw);
2513 ixgbevf_negotiate_api(adapter);
2514 }
2515
2516 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2517 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2518 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2519 }
2520
2521 adapter->last_reset = jiffies;
2522 }
2523
ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter * adapter,int vectors)2524 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2525 int vectors)
2526 {
2527 int vector_threshold;
2528
2529 /* We'll want at least 2 (vector_threshold):
2530 * 1) TxQ[0] + RxQ[0] handler
2531 * 2) Other (Link Status Change, etc.)
2532 */
2533 vector_threshold = MIN_MSIX_COUNT;
2534
2535 /* The more we get, the more we will assign to Tx/Rx Cleanup
2536 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2537 * Right now, we simply care about how many we'll get; we'll
2538 * set them up later while requesting irq's.
2539 */
2540 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2541 vector_threshold, vectors);
2542
2543 if (vectors < 0) {
2544 dev_err(&adapter->pdev->dev,
2545 "Unable to allocate MSI-X interrupts\n");
2546 kfree(adapter->msix_entries);
2547 adapter->msix_entries = NULL;
2548 return vectors;
2549 }
2550
2551 /* Adjust for only the vectors we'll use, which is minimum
2552 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2553 * vectors we were allocated.
2554 */
2555 adapter->num_msix_vectors = vectors;
2556
2557 return 0;
2558 }
2559
2560 /**
2561 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2562 * @adapter: board private structure to initialize
2563 *
2564 * This is the top level queue allocation routine. The order here is very
2565 * important, starting with the "most" number of features turned on at once,
2566 * and ending with the smallest set of features. This way large combinations
2567 * can be allocated if they're turned on, and smaller combinations are the
2568 * fallthrough conditions.
2569 *
2570 **/
ixgbevf_set_num_queues(struct ixgbevf_adapter * adapter)2571 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2572 {
2573 struct ixgbe_hw *hw = &adapter->hw;
2574 unsigned int def_q = 0;
2575 unsigned int num_tcs = 0;
2576 int err;
2577
2578 /* Start with base case */
2579 adapter->num_rx_queues = 1;
2580 adapter->num_tx_queues = 1;
2581 adapter->num_xdp_queues = 0;
2582
2583 spin_lock_bh(&adapter->mbx_lock);
2584
2585 /* fetch queue configuration from the PF */
2586 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2587
2588 spin_unlock_bh(&adapter->mbx_lock);
2589
2590 if (err)
2591 return;
2592
2593 /* we need as many queues as traffic classes */
2594 if (num_tcs > 1) {
2595 adapter->num_rx_queues = num_tcs;
2596 } else {
2597 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2598
2599 switch (hw->api_version) {
2600 case ixgbe_mbox_api_11:
2601 case ixgbe_mbox_api_12:
2602 case ixgbe_mbox_api_13:
2603 if (adapter->xdp_prog &&
2604 hw->mac.max_tx_queues == rss)
2605 rss = rss > 3 ? 2 : 1;
2606
2607 adapter->num_rx_queues = rss;
2608 adapter->num_tx_queues = rss;
2609 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2610 default:
2611 break;
2612 }
2613 }
2614 }
2615
2616 /**
2617 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2618 * @adapter: board private structure to initialize
2619 *
2620 * Attempt to configure the interrupts using the best available
2621 * capabilities of the hardware and the kernel.
2622 **/
ixgbevf_set_interrupt_capability(struct ixgbevf_adapter * adapter)2623 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2624 {
2625 int vector, v_budget;
2626
2627 /* It's easy to be greedy for MSI-X vectors, but it really
2628 * doesn't do us much good if we have a lot more vectors
2629 * than CPU's. So let's be conservative and only ask for
2630 * (roughly) the same number of vectors as there are CPU's.
2631 * The default is to use pairs of vectors.
2632 */
2633 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2634 v_budget = min_t(int, v_budget, num_online_cpus());
2635 v_budget += NON_Q_VECTORS;
2636
2637 adapter->msix_entries = kcalloc(v_budget,
2638 sizeof(struct msix_entry), GFP_KERNEL);
2639 if (!adapter->msix_entries)
2640 return -ENOMEM;
2641
2642 for (vector = 0; vector < v_budget; vector++)
2643 adapter->msix_entries[vector].entry = vector;
2644
2645 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2646 * does not support any other modes, so we will simply fail here. Note
2647 * that we clean up the msix_entries pointer else-where.
2648 */
2649 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2650 }
2651
ixgbevf_add_ring(struct ixgbevf_ring * ring,struct ixgbevf_ring_container * head)2652 static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2653 struct ixgbevf_ring_container *head)
2654 {
2655 ring->next = head->ring;
2656 head->ring = ring;
2657 head->count++;
2658 }
2659
2660 /**
2661 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2662 * @adapter: board private structure to initialize
2663 * @v_idx: index of vector in adapter struct
2664 * @txr_count: number of Tx rings for q vector
2665 * @txr_idx: index of first Tx ring to assign
2666 * @xdp_count: total number of XDP rings to allocate
2667 * @xdp_idx: index of first XDP ring to allocate
2668 * @rxr_count: number of Rx rings for q vector
2669 * @rxr_idx: index of first Rx ring to assign
2670 *
2671 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2672 **/
ixgbevf_alloc_q_vector(struct ixgbevf_adapter * adapter,int v_idx,int txr_count,int txr_idx,int xdp_count,int xdp_idx,int rxr_count,int rxr_idx)2673 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2674 int txr_count, int txr_idx,
2675 int xdp_count, int xdp_idx,
2676 int rxr_count, int rxr_idx)
2677 {
2678 struct ixgbevf_q_vector *q_vector;
2679 int reg_idx = txr_idx + xdp_idx;
2680 struct ixgbevf_ring *ring;
2681 int ring_count, size;
2682
2683 ring_count = txr_count + xdp_count + rxr_count;
2684 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2685
2686 /* allocate q_vector and rings */
2687 q_vector = kzalloc(size, GFP_KERNEL);
2688 if (!q_vector)
2689 return -ENOMEM;
2690
2691 /* initialize NAPI */
2692 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2693
2694 /* tie q_vector and adapter together */
2695 adapter->q_vector[v_idx] = q_vector;
2696 q_vector->adapter = adapter;
2697 q_vector->v_idx = v_idx;
2698
2699 /* initialize pointer to rings */
2700 ring = q_vector->ring;
2701
2702 while (txr_count) {
2703 /* assign generic ring traits */
2704 ring->dev = &adapter->pdev->dev;
2705 ring->netdev = adapter->netdev;
2706
2707 /* configure backlink on ring */
2708 ring->q_vector = q_vector;
2709
2710 /* update q_vector Tx values */
2711 ixgbevf_add_ring(ring, &q_vector->tx);
2712
2713 /* apply Tx specific ring traits */
2714 ring->count = adapter->tx_ring_count;
2715 ring->queue_index = txr_idx;
2716 ring->reg_idx = reg_idx;
2717
2718 /* assign ring to adapter */
2719 adapter->tx_ring[txr_idx] = ring;
2720
2721 /* update count and index */
2722 txr_count--;
2723 txr_idx++;
2724 reg_idx++;
2725
2726 /* push pointer to next ring */
2727 ring++;
2728 }
2729
2730 while (xdp_count) {
2731 /* assign generic ring traits */
2732 ring->dev = &adapter->pdev->dev;
2733 ring->netdev = adapter->netdev;
2734
2735 /* configure backlink on ring */
2736 ring->q_vector = q_vector;
2737
2738 /* update q_vector Tx values */
2739 ixgbevf_add_ring(ring, &q_vector->tx);
2740
2741 /* apply Tx specific ring traits */
2742 ring->count = adapter->tx_ring_count;
2743 ring->queue_index = xdp_idx;
2744 ring->reg_idx = reg_idx;
2745 set_ring_xdp(ring);
2746
2747 /* assign ring to adapter */
2748 adapter->xdp_ring[xdp_idx] = ring;
2749
2750 /* update count and index */
2751 xdp_count--;
2752 xdp_idx++;
2753 reg_idx++;
2754
2755 /* push pointer to next ring */
2756 ring++;
2757 }
2758
2759 while (rxr_count) {
2760 /* assign generic ring traits */
2761 ring->dev = &adapter->pdev->dev;
2762 ring->netdev = adapter->netdev;
2763
2764 /* configure backlink on ring */
2765 ring->q_vector = q_vector;
2766
2767 /* update q_vector Rx values */
2768 ixgbevf_add_ring(ring, &q_vector->rx);
2769
2770 /* apply Rx specific ring traits */
2771 ring->count = adapter->rx_ring_count;
2772 ring->queue_index = rxr_idx;
2773 ring->reg_idx = rxr_idx;
2774
2775 /* assign ring to adapter */
2776 adapter->rx_ring[rxr_idx] = ring;
2777
2778 /* update count and index */
2779 rxr_count--;
2780 rxr_idx++;
2781
2782 /* push pointer to next ring */
2783 ring++;
2784 }
2785
2786 return 0;
2787 }
2788
2789 /**
2790 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2791 * @adapter: board private structure to initialize
2792 * @v_idx: index of vector in adapter struct
2793 *
2794 * This function frees the memory allocated to the q_vector. In addition if
2795 * NAPI is enabled it will delete any references to the NAPI struct prior
2796 * to freeing the q_vector.
2797 **/
ixgbevf_free_q_vector(struct ixgbevf_adapter * adapter,int v_idx)2798 static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2799 {
2800 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2801 struct ixgbevf_ring *ring;
2802
2803 ixgbevf_for_each_ring(ring, q_vector->tx) {
2804 if (ring_is_xdp(ring))
2805 adapter->xdp_ring[ring->queue_index] = NULL;
2806 else
2807 adapter->tx_ring[ring->queue_index] = NULL;
2808 }
2809
2810 ixgbevf_for_each_ring(ring, q_vector->rx)
2811 adapter->rx_ring[ring->queue_index] = NULL;
2812
2813 adapter->q_vector[v_idx] = NULL;
2814 netif_napi_del(&q_vector->napi);
2815
2816 /* ixgbevf_get_stats() might access the rings on this vector,
2817 * we must wait a grace period before freeing it.
2818 */
2819 kfree_rcu(q_vector, rcu);
2820 }
2821
2822 /**
2823 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2824 * @adapter: board private structure to initialize
2825 *
2826 * We allocate one q_vector per queue interrupt. If allocation fails we
2827 * return -ENOMEM.
2828 **/
ixgbevf_alloc_q_vectors(struct ixgbevf_adapter * adapter)2829 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2830 {
2831 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2832 int rxr_remaining = adapter->num_rx_queues;
2833 int txr_remaining = adapter->num_tx_queues;
2834 int xdp_remaining = adapter->num_xdp_queues;
2835 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2836 int err;
2837
2838 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2839 for (; rxr_remaining; v_idx++, q_vectors--) {
2840 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2841
2842 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2843 0, 0, 0, 0, rqpv, rxr_idx);
2844 if (err)
2845 goto err_out;
2846
2847 /* update counts and index */
2848 rxr_remaining -= rqpv;
2849 rxr_idx += rqpv;
2850 }
2851 }
2852
2853 for (; q_vectors; v_idx++, q_vectors--) {
2854 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2855 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2856 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2857
2858 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2859 tqpv, txr_idx,
2860 xqpv, xdp_idx,
2861 rqpv, rxr_idx);
2862
2863 if (err)
2864 goto err_out;
2865
2866 /* update counts and index */
2867 rxr_remaining -= rqpv;
2868 rxr_idx += rqpv;
2869 txr_remaining -= tqpv;
2870 txr_idx += tqpv;
2871 xdp_remaining -= xqpv;
2872 xdp_idx += xqpv;
2873 }
2874
2875 return 0;
2876
2877 err_out:
2878 while (v_idx) {
2879 v_idx--;
2880 ixgbevf_free_q_vector(adapter, v_idx);
2881 }
2882
2883 return -ENOMEM;
2884 }
2885
2886 /**
2887 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2888 * @adapter: board private structure to initialize
2889 *
2890 * This function frees the memory allocated to the q_vectors. In addition if
2891 * NAPI is enabled it will delete any references to the NAPI struct prior
2892 * to freeing the q_vector.
2893 **/
ixgbevf_free_q_vectors(struct ixgbevf_adapter * adapter)2894 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2895 {
2896 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2897
2898 while (q_vectors) {
2899 q_vectors--;
2900 ixgbevf_free_q_vector(adapter, q_vectors);
2901 }
2902 }
2903
2904 /**
2905 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2906 * @adapter: board private structure
2907 *
2908 **/
ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter * adapter)2909 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2910 {
2911 if (!adapter->msix_entries)
2912 return;
2913
2914 pci_disable_msix(adapter->pdev);
2915 kfree(adapter->msix_entries);
2916 adapter->msix_entries = NULL;
2917 }
2918
2919 /**
2920 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2921 * @adapter: board private structure to initialize
2922 *
2923 **/
ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter * adapter)2924 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2925 {
2926 int err;
2927
2928 /* Number of supported queues */
2929 ixgbevf_set_num_queues(adapter);
2930
2931 err = ixgbevf_set_interrupt_capability(adapter);
2932 if (err) {
2933 hw_dbg(&adapter->hw,
2934 "Unable to setup interrupt capabilities\n");
2935 goto err_set_interrupt;
2936 }
2937
2938 err = ixgbevf_alloc_q_vectors(adapter);
2939 if (err) {
2940 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2941 goto err_alloc_q_vectors;
2942 }
2943
2944 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2945 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2946 adapter->num_rx_queues, adapter->num_tx_queues,
2947 adapter->num_xdp_queues);
2948
2949 set_bit(__IXGBEVF_DOWN, &adapter->state);
2950
2951 return 0;
2952 err_alloc_q_vectors:
2953 ixgbevf_reset_interrupt_capability(adapter);
2954 err_set_interrupt:
2955 return err;
2956 }
2957
2958 /**
2959 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2960 * @adapter: board private structure to clear interrupt scheme on
2961 *
2962 * We go through and clear interrupt specific resources and reset the structure
2963 * to pre-load conditions
2964 **/
ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter * adapter)2965 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2966 {
2967 adapter->num_tx_queues = 0;
2968 adapter->num_xdp_queues = 0;
2969 adapter->num_rx_queues = 0;
2970
2971 ixgbevf_free_q_vectors(adapter);
2972 ixgbevf_reset_interrupt_capability(adapter);
2973 }
2974
2975 /**
2976 * ixgbevf_sw_init - Initialize general software structures
2977 * @adapter: board private structure to initialize
2978 *
2979 * ixgbevf_sw_init initializes the Adapter private data structure.
2980 * Fields are initialized based on PCI device information and
2981 * OS network device settings (MTU size).
2982 **/
ixgbevf_sw_init(struct ixgbevf_adapter * adapter)2983 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2984 {
2985 struct ixgbe_hw *hw = &adapter->hw;
2986 struct pci_dev *pdev = adapter->pdev;
2987 struct net_device *netdev = adapter->netdev;
2988 int err;
2989
2990 /* PCI config space info */
2991 hw->vendor_id = pdev->vendor;
2992 hw->device_id = pdev->device;
2993 hw->revision_id = pdev->revision;
2994 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2995 hw->subsystem_device_id = pdev->subsystem_device;
2996
2997 hw->mbx.ops.init_params(hw);
2998
2999 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3000 err = ixgbevf_init_rss_key(adapter);
3001 if (err)
3002 goto out;
3003 }
3004
3005 /* assume legacy case in which PF would only give VF 2 queues */
3006 hw->mac.max_tx_queues = 2;
3007 hw->mac.max_rx_queues = 2;
3008
3009 /* lock to protect mailbox accesses */
3010 spin_lock_init(&adapter->mbx_lock);
3011
3012 err = hw->mac.ops.reset_hw(hw);
3013 if (err) {
3014 dev_info(&pdev->dev,
3015 "PF still in reset state. Is the PF interface up?\n");
3016 } else {
3017 err = hw->mac.ops.init_hw(hw);
3018 if (err) {
3019 pr_err("init_shared_code failed: %d\n", err);
3020 goto out;
3021 }
3022 ixgbevf_negotiate_api(adapter);
3023 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3024 if (err)
3025 dev_info(&pdev->dev, "Error reading MAC address\n");
3026 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3027 dev_info(&pdev->dev,
3028 "MAC address not assigned by administrator.\n");
3029 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3030 }
3031
3032 if (!is_valid_ether_addr(netdev->dev_addr)) {
3033 dev_info(&pdev->dev, "Assigning random MAC address\n");
3034 eth_hw_addr_random(netdev);
3035 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3036 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3037 }
3038
3039 /* Enable dynamic interrupt throttling rates */
3040 adapter->rx_itr_setting = 1;
3041 adapter->tx_itr_setting = 1;
3042
3043 /* set default ring sizes */
3044 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3045 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3046
3047 set_bit(__IXGBEVF_DOWN, &adapter->state);
3048 return 0;
3049
3050 out:
3051 return err;
3052 }
3053
3054 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3055 { \
3056 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3057 if (current_counter < last_counter) \
3058 counter += 0x100000000LL; \
3059 last_counter = current_counter; \
3060 counter &= 0xFFFFFFFF00000000LL; \
3061 counter |= current_counter; \
3062 }
3063
3064 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3065 { \
3066 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3067 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3068 u64 current_counter = (current_counter_msb << 32) | \
3069 current_counter_lsb; \
3070 if (current_counter < last_counter) \
3071 counter += 0x1000000000LL; \
3072 last_counter = current_counter; \
3073 counter &= 0xFFFFFFF000000000LL; \
3074 counter |= current_counter; \
3075 }
3076 /**
3077 * ixgbevf_update_stats - Update the board statistics counters.
3078 * @adapter: board private structure
3079 **/
ixgbevf_update_stats(struct ixgbevf_adapter * adapter)3080 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3081 {
3082 struct ixgbe_hw *hw = &adapter->hw;
3083 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3084 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3085 int i;
3086
3087 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3088 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3089 return;
3090
3091 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3092 adapter->stats.vfgprc);
3093 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3094 adapter->stats.vfgptc);
3095 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3096 adapter->stats.last_vfgorc,
3097 adapter->stats.vfgorc);
3098 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3099 adapter->stats.last_vfgotc,
3100 adapter->stats.vfgotc);
3101 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3102 adapter->stats.vfmprc);
3103
3104 for (i = 0; i < adapter->num_rx_queues; i++) {
3105 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3106
3107 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3108 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3109 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3110 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3111 }
3112
3113 adapter->hw_csum_rx_error = hw_csum_rx_error;
3114 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3115 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3116 adapter->alloc_rx_page = alloc_rx_page;
3117 }
3118
3119 /**
3120 * ixgbevf_service_timer - Timer Call-back
3121 * @t: pointer to timer_list struct
3122 **/
ixgbevf_service_timer(struct timer_list * t)3123 static void ixgbevf_service_timer(struct timer_list *t)
3124 {
3125 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3126 service_timer);
3127
3128 /* Reset the timer */
3129 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3130
3131 ixgbevf_service_event_schedule(adapter);
3132 }
3133
ixgbevf_reset_subtask(struct ixgbevf_adapter * adapter)3134 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3135 {
3136 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3137 return;
3138
3139 rtnl_lock();
3140 /* If we're already down or resetting, just bail */
3141 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3142 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3143 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3144 rtnl_unlock();
3145 return;
3146 }
3147
3148 adapter->tx_timeout_count++;
3149
3150 ixgbevf_reinit_locked(adapter);
3151 rtnl_unlock();
3152 }
3153
3154 /**
3155 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3156 * @adapter: pointer to the device adapter structure
3157 *
3158 * This function serves two purposes. First it strobes the interrupt lines
3159 * in order to make certain interrupts are occurring. Secondly it sets the
3160 * bits needed to check for TX hangs. As a result we should immediately
3161 * determine if a hang has occurred.
3162 **/
ixgbevf_check_hang_subtask(struct ixgbevf_adapter * adapter)3163 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3164 {
3165 struct ixgbe_hw *hw = &adapter->hw;
3166 u32 eics = 0;
3167 int i;
3168
3169 /* If we're down or resetting, just bail */
3170 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3171 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3172 return;
3173
3174 /* Force detection of hung controller */
3175 if (netif_carrier_ok(adapter->netdev)) {
3176 for (i = 0; i < adapter->num_tx_queues; i++)
3177 set_check_for_tx_hang(adapter->tx_ring[i]);
3178 for (i = 0; i < adapter->num_xdp_queues; i++)
3179 set_check_for_tx_hang(adapter->xdp_ring[i]);
3180 }
3181
3182 /* get one bit for every active Tx/Rx interrupt vector */
3183 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3184 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3185
3186 if (qv->rx.ring || qv->tx.ring)
3187 eics |= BIT(i);
3188 }
3189
3190 /* Cause software interrupt to ensure rings are cleaned */
3191 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3192 }
3193
3194 /**
3195 * ixgbevf_watchdog_update_link - update the link status
3196 * @adapter: pointer to the device adapter structure
3197 **/
ixgbevf_watchdog_update_link(struct ixgbevf_adapter * adapter)3198 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3199 {
3200 struct ixgbe_hw *hw = &adapter->hw;
3201 u32 link_speed = adapter->link_speed;
3202 bool link_up = adapter->link_up;
3203 s32 err;
3204
3205 spin_lock_bh(&adapter->mbx_lock);
3206
3207 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3208
3209 spin_unlock_bh(&adapter->mbx_lock);
3210
3211 /* if check for link returns error we will need to reset */
3212 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3213 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3214 link_up = false;
3215 }
3216
3217 adapter->link_up = link_up;
3218 adapter->link_speed = link_speed;
3219 }
3220
3221 /**
3222 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3223 * print link up message
3224 * @adapter: pointer to the device adapter structure
3225 **/
ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter * adapter)3226 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3227 {
3228 struct net_device *netdev = adapter->netdev;
3229
3230 /* only continue if link was previously down */
3231 if (netif_carrier_ok(netdev))
3232 return;
3233
3234 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3235 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3236 "10 Gbps" :
3237 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3238 "1 Gbps" :
3239 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3240 "100 Mbps" :
3241 "unknown speed");
3242
3243 netif_carrier_on(netdev);
3244 }
3245
3246 /**
3247 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3248 * print link down message
3249 * @adapter: pointer to the adapter structure
3250 **/
ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter * adapter)3251 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3252 {
3253 struct net_device *netdev = adapter->netdev;
3254
3255 adapter->link_speed = 0;
3256
3257 /* only continue if link was up previously */
3258 if (!netif_carrier_ok(netdev))
3259 return;
3260
3261 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3262
3263 netif_carrier_off(netdev);
3264 }
3265
3266 /**
3267 * ixgbevf_watchdog_subtask - worker thread to bring link up
3268 * @adapter: board private structure
3269 **/
ixgbevf_watchdog_subtask(struct ixgbevf_adapter * adapter)3270 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3271 {
3272 /* if interface is down do nothing */
3273 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3274 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3275 return;
3276
3277 ixgbevf_watchdog_update_link(adapter);
3278
3279 if (adapter->link_up)
3280 ixgbevf_watchdog_link_is_up(adapter);
3281 else
3282 ixgbevf_watchdog_link_is_down(adapter);
3283
3284 ixgbevf_update_stats(adapter);
3285 }
3286
3287 /**
3288 * ixgbevf_service_task - manages and runs subtasks
3289 * @work: pointer to work_struct containing our data
3290 **/
ixgbevf_service_task(struct work_struct * work)3291 static void ixgbevf_service_task(struct work_struct *work)
3292 {
3293 struct ixgbevf_adapter *adapter = container_of(work,
3294 struct ixgbevf_adapter,
3295 service_task);
3296 struct ixgbe_hw *hw = &adapter->hw;
3297
3298 if (IXGBE_REMOVED(hw->hw_addr)) {
3299 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3300 rtnl_lock();
3301 ixgbevf_down(adapter);
3302 rtnl_unlock();
3303 }
3304 return;
3305 }
3306
3307 ixgbevf_queue_reset_subtask(adapter);
3308 ixgbevf_reset_subtask(adapter);
3309 ixgbevf_watchdog_subtask(adapter);
3310 ixgbevf_check_hang_subtask(adapter);
3311
3312 ixgbevf_service_event_complete(adapter);
3313 }
3314
3315 /**
3316 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3317 * @tx_ring: Tx descriptor ring for a specific queue
3318 *
3319 * Free all transmit software resources
3320 **/
ixgbevf_free_tx_resources(struct ixgbevf_ring * tx_ring)3321 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3322 {
3323 ixgbevf_clean_tx_ring(tx_ring);
3324
3325 vfree(tx_ring->tx_buffer_info);
3326 tx_ring->tx_buffer_info = NULL;
3327
3328 /* if not set, then don't free */
3329 if (!tx_ring->desc)
3330 return;
3331
3332 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3333 tx_ring->dma);
3334
3335 tx_ring->desc = NULL;
3336 }
3337
3338 /**
3339 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3340 * @adapter: board private structure
3341 *
3342 * Free all transmit software resources
3343 **/
ixgbevf_free_all_tx_resources(struct ixgbevf_adapter * adapter)3344 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3345 {
3346 int i;
3347
3348 for (i = 0; i < adapter->num_tx_queues; i++)
3349 if (adapter->tx_ring[i]->desc)
3350 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3351 for (i = 0; i < adapter->num_xdp_queues; i++)
3352 if (adapter->xdp_ring[i]->desc)
3353 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3354 }
3355
3356 /**
3357 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3358 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3359 *
3360 * Return 0 on success, negative on failure
3361 **/
ixgbevf_setup_tx_resources(struct ixgbevf_ring * tx_ring)3362 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3363 {
3364 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3365 int size;
3366
3367 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3368 tx_ring->tx_buffer_info = vmalloc(size);
3369 if (!tx_ring->tx_buffer_info)
3370 goto err;
3371
3372 u64_stats_init(&tx_ring->syncp);
3373
3374 /* round up to nearest 4K */
3375 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3376 tx_ring->size = ALIGN(tx_ring->size, 4096);
3377
3378 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3379 &tx_ring->dma, GFP_KERNEL);
3380 if (!tx_ring->desc)
3381 goto err;
3382
3383 return 0;
3384
3385 err:
3386 vfree(tx_ring->tx_buffer_info);
3387 tx_ring->tx_buffer_info = NULL;
3388 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3389 return -ENOMEM;
3390 }
3391
3392 /**
3393 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3394 * @adapter: board private structure
3395 *
3396 * If this function returns with an error, then it's possible one or
3397 * more of the rings is populated (while the rest are not). It is the
3398 * callers duty to clean those orphaned rings.
3399 *
3400 * Return 0 on success, negative on failure
3401 **/
ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter * adapter)3402 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3403 {
3404 int i, j = 0, err = 0;
3405
3406 for (i = 0; i < adapter->num_tx_queues; i++) {
3407 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3408 if (!err)
3409 continue;
3410 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3411 goto err_setup_tx;
3412 }
3413
3414 for (j = 0; j < adapter->num_xdp_queues; j++) {
3415 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3416 if (!err)
3417 continue;
3418 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3419 goto err_setup_tx;
3420 }
3421
3422 return 0;
3423 err_setup_tx:
3424 /* rewind the index freeing the rings as we go */
3425 while (j--)
3426 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3427 while (i--)
3428 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3429
3430 return err;
3431 }
3432
3433 /**
3434 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3435 * @adapter: board private structure
3436 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3437 *
3438 * Returns 0 on success, negative on failure
3439 **/
ixgbevf_setup_rx_resources(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring)3440 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3441 struct ixgbevf_ring *rx_ring)
3442 {
3443 int size;
3444
3445 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3446 rx_ring->rx_buffer_info = vmalloc(size);
3447 if (!rx_ring->rx_buffer_info)
3448 goto err;
3449
3450 u64_stats_init(&rx_ring->syncp);
3451
3452 /* Round up to nearest 4K */
3453 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3454 rx_ring->size = ALIGN(rx_ring->size, 4096);
3455
3456 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3457 &rx_ring->dma, GFP_KERNEL);
3458
3459 if (!rx_ring->desc)
3460 goto err;
3461
3462 /* XDP RX-queue info */
3463 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3464 rx_ring->queue_index) < 0)
3465 goto err;
3466
3467 rx_ring->xdp_prog = adapter->xdp_prog;
3468
3469 return 0;
3470 err:
3471 vfree(rx_ring->rx_buffer_info);
3472 rx_ring->rx_buffer_info = NULL;
3473 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3474 return -ENOMEM;
3475 }
3476
3477 /**
3478 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3479 * @adapter: board private structure
3480 *
3481 * If this function returns with an error, then it's possible one or
3482 * more of the rings is populated (while the rest are not). It is the
3483 * callers duty to clean those orphaned rings.
3484 *
3485 * Return 0 on success, negative on failure
3486 **/
ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter * adapter)3487 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3488 {
3489 int i, err = 0;
3490
3491 for (i = 0; i < adapter->num_rx_queues; i++) {
3492 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3493 if (!err)
3494 continue;
3495 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3496 goto err_setup_rx;
3497 }
3498
3499 return 0;
3500 err_setup_rx:
3501 /* rewind the index freeing the rings as we go */
3502 while (i--)
3503 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3504 return err;
3505 }
3506
3507 /**
3508 * ixgbevf_free_rx_resources - Free Rx Resources
3509 * @rx_ring: ring to clean the resources from
3510 *
3511 * Free all receive software resources
3512 **/
ixgbevf_free_rx_resources(struct ixgbevf_ring * rx_ring)3513 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3514 {
3515 ixgbevf_clean_rx_ring(rx_ring);
3516
3517 rx_ring->xdp_prog = NULL;
3518 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3519 vfree(rx_ring->rx_buffer_info);
3520 rx_ring->rx_buffer_info = NULL;
3521
3522 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3523 rx_ring->dma);
3524
3525 rx_ring->desc = NULL;
3526 }
3527
3528 /**
3529 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3530 * @adapter: board private structure
3531 *
3532 * Free all receive software resources
3533 **/
ixgbevf_free_all_rx_resources(struct ixgbevf_adapter * adapter)3534 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3535 {
3536 int i;
3537
3538 for (i = 0; i < adapter->num_rx_queues; i++)
3539 if (adapter->rx_ring[i]->desc)
3540 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3541 }
3542
3543 /**
3544 * ixgbevf_open - Called when a network interface is made active
3545 * @netdev: network interface device structure
3546 *
3547 * Returns 0 on success, negative value on failure
3548 *
3549 * The open entry point is called when a network interface is made
3550 * active by the system (IFF_UP). At this point all resources needed
3551 * for transmit and receive operations are allocated, the interrupt
3552 * handler is registered with the OS, the watchdog timer is started,
3553 * and the stack is notified that the interface is ready.
3554 **/
ixgbevf_open(struct net_device * netdev)3555 int ixgbevf_open(struct net_device *netdev)
3556 {
3557 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3558 struct ixgbe_hw *hw = &adapter->hw;
3559 int err;
3560
3561 /* A previous failure to open the device because of a lack of
3562 * available MSIX vector resources may have reset the number
3563 * of msix vectors variable to zero. The only way to recover
3564 * is to unload/reload the driver and hope that the system has
3565 * been able to recover some MSIX vector resources.
3566 */
3567 if (!adapter->num_msix_vectors)
3568 return -ENOMEM;
3569
3570 if (hw->adapter_stopped) {
3571 ixgbevf_reset(adapter);
3572 /* if adapter is still stopped then PF isn't up and
3573 * the VF can't start.
3574 */
3575 if (hw->adapter_stopped) {
3576 err = IXGBE_ERR_MBX;
3577 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3578 goto err_setup_reset;
3579 }
3580 }
3581
3582 /* disallow open during test */
3583 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3584 return -EBUSY;
3585
3586 netif_carrier_off(netdev);
3587
3588 /* allocate transmit descriptors */
3589 err = ixgbevf_setup_all_tx_resources(adapter);
3590 if (err)
3591 goto err_setup_tx;
3592
3593 /* allocate receive descriptors */
3594 err = ixgbevf_setup_all_rx_resources(adapter);
3595 if (err)
3596 goto err_setup_rx;
3597
3598 ixgbevf_configure(adapter);
3599
3600 err = ixgbevf_request_irq(adapter);
3601 if (err)
3602 goto err_req_irq;
3603
3604 /* Notify the stack of the actual queue counts. */
3605 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3606 if (err)
3607 goto err_set_queues;
3608
3609 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3610 if (err)
3611 goto err_set_queues;
3612
3613 ixgbevf_up_complete(adapter);
3614
3615 return 0;
3616
3617 err_set_queues:
3618 ixgbevf_free_irq(adapter);
3619 err_req_irq:
3620 ixgbevf_free_all_rx_resources(adapter);
3621 err_setup_rx:
3622 ixgbevf_free_all_tx_resources(adapter);
3623 err_setup_tx:
3624 ixgbevf_reset(adapter);
3625 err_setup_reset:
3626
3627 return err;
3628 }
3629
3630 /**
3631 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3632 * @adapter: the private adapter struct
3633 *
3634 * This function should contain the necessary work common to both suspending
3635 * and closing of the device.
3636 */
ixgbevf_close_suspend(struct ixgbevf_adapter * adapter)3637 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3638 {
3639 ixgbevf_down(adapter);
3640 ixgbevf_free_irq(adapter);
3641 ixgbevf_free_all_tx_resources(adapter);
3642 ixgbevf_free_all_rx_resources(adapter);
3643 }
3644
3645 /**
3646 * ixgbevf_close - Disables a network interface
3647 * @netdev: network interface device structure
3648 *
3649 * Returns 0, this is not allowed to fail
3650 *
3651 * The close entry point is called when an interface is de-activated
3652 * by the OS. The hardware is still under the drivers control, but
3653 * needs to be disabled. A global MAC reset is issued to stop the
3654 * hardware, and all transmit and receive resources are freed.
3655 **/
ixgbevf_close(struct net_device * netdev)3656 int ixgbevf_close(struct net_device *netdev)
3657 {
3658 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3659
3660 if (netif_device_present(netdev))
3661 ixgbevf_close_suspend(adapter);
3662
3663 return 0;
3664 }
3665
ixgbevf_queue_reset_subtask(struct ixgbevf_adapter * adapter)3666 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3667 {
3668 struct net_device *dev = adapter->netdev;
3669
3670 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3671 &adapter->state))
3672 return;
3673
3674 /* if interface is down do nothing */
3675 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3676 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3677 return;
3678
3679 /* Hardware has to reinitialize queues and interrupts to
3680 * match packet buffer alignment. Unfortunately, the
3681 * hardware is not flexible enough to do this dynamically.
3682 */
3683 rtnl_lock();
3684
3685 if (netif_running(dev))
3686 ixgbevf_close(dev);
3687
3688 ixgbevf_clear_interrupt_scheme(adapter);
3689 ixgbevf_init_interrupt_scheme(adapter);
3690
3691 if (netif_running(dev))
3692 ixgbevf_open(dev);
3693
3694 rtnl_unlock();
3695 }
3696
ixgbevf_tx_ctxtdesc(struct ixgbevf_ring * tx_ring,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx)3697 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3698 u32 vlan_macip_lens, u32 type_tucmd,
3699 u32 mss_l4len_idx)
3700 {
3701 struct ixgbe_adv_tx_context_desc *context_desc;
3702 u16 i = tx_ring->next_to_use;
3703
3704 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3705
3706 i++;
3707 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3708
3709 /* set bits to identify this as an advanced context descriptor */
3710 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3711
3712 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3713 context_desc->seqnum_seed = 0;
3714 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3715 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3716 }
3717
ixgbevf_tso(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,u8 * hdr_len)3718 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3719 struct ixgbevf_tx_buffer *first,
3720 u8 *hdr_len)
3721 {
3722 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3723 struct sk_buff *skb = first->skb;
3724 union {
3725 struct iphdr *v4;
3726 struct ipv6hdr *v6;
3727 unsigned char *hdr;
3728 } ip;
3729 union {
3730 struct tcphdr *tcp;
3731 unsigned char *hdr;
3732 } l4;
3733 u32 paylen, l4_offset;
3734 int err;
3735
3736 if (skb->ip_summed != CHECKSUM_PARTIAL)
3737 return 0;
3738
3739 if (!skb_is_gso(skb))
3740 return 0;
3741
3742 err = skb_cow_head(skb, 0);
3743 if (err < 0)
3744 return err;
3745
3746 if (eth_p_mpls(first->protocol))
3747 ip.hdr = skb_inner_network_header(skb);
3748 else
3749 ip.hdr = skb_network_header(skb);
3750 l4.hdr = skb_checksum_start(skb);
3751
3752 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3753 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3754
3755 /* initialize outer IP header fields */
3756 if (ip.v4->version == 4) {
3757 unsigned char *csum_start = skb_checksum_start(skb);
3758 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3759
3760 /* IP header will have to cancel out any data that
3761 * is not a part of the outer IP header
3762 */
3763 ip.v4->check = csum_fold(csum_partial(trans_start,
3764 csum_start - trans_start,
3765 0));
3766 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3767
3768 ip.v4->tot_len = 0;
3769 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3770 IXGBE_TX_FLAGS_CSUM |
3771 IXGBE_TX_FLAGS_IPV4;
3772 } else {
3773 ip.v6->payload_len = 0;
3774 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3775 IXGBE_TX_FLAGS_CSUM;
3776 }
3777
3778 /* determine offset of inner transport header */
3779 l4_offset = l4.hdr - skb->data;
3780
3781 /* compute length of segmentation header */
3782 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3783
3784 /* remove payload length from inner checksum */
3785 paylen = skb->len - l4_offset;
3786 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3787
3788 /* update gso size and bytecount with header size */
3789 first->gso_segs = skb_shinfo(skb)->gso_segs;
3790 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3791
3792 /* mss_l4len_id: use 1 as index for TSO */
3793 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3794 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3795 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3796
3797 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3798 vlan_macip_lens = l4.hdr - ip.hdr;
3799 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3800 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3801
3802 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3803 type_tucmd, mss_l4len_idx);
3804
3805 return 1;
3806 }
3807
ixgbevf_ipv6_csum_is_sctp(struct sk_buff * skb)3808 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3809 {
3810 unsigned int offset = 0;
3811
3812 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3813
3814 return offset == skb_checksum_start_offset(skb);
3815 }
3816
ixgbevf_tx_csum(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first)3817 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3818 struct ixgbevf_tx_buffer *first)
3819 {
3820 struct sk_buff *skb = first->skb;
3821 u32 vlan_macip_lens = 0;
3822 u32 type_tucmd = 0;
3823
3824 if (skb->ip_summed != CHECKSUM_PARTIAL)
3825 goto no_csum;
3826
3827 switch (skb->csum_offset) {
3828 case offsetof(struct tcphdr, check):
3829 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3830 /* fall through */
3831 case offsetof(struct udphdr, check):
3832 break;
3833 case offsetof(struct sctphdr, checksum):
3834 /* validate that this is actually an SCTP request */
3835 if (((first->protocol == htons(ETH_P_IP)) &&
3836 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3837 ((first->protocol == htons(ETH_P_IPV6)) &&
3838 ixgbevf_ipv6_csum_is_sctp(skb))) {
3839 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3840 break;
3841 }
3842 /* fall through */
3843 default:
3844 skb_checksum_help(skb);
3845 goto no_csum;
3846 }
3847
3848 if (first->protocol == htons(ETH_P_IP))
3849 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3850
3851 /* update TX checksum flag */
3852 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3853 vlan_macip_lens = skb_checksum_start_offset(skb) -
3854 skb_network_offset(skb);
3855 no_csum:
3856 /* vlan_macip_lens: MACLEN, VLAN tag */
3857 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3858 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3859
3860 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3861 }
3862
ixgbevf_tx_cmd_type(u32 tx_flags)3863 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3864 {
3865 /* set type for advanced descriptor with frame checksum insertion */
3866 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3867 IXGBE_ADVTXD_DCMD_IFCS |
3868 IXGBE_ADVTXD_DCMD_DEXT);
3869
3870 /* set HW VLAN bit if VLAN is present */
3871 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3872 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3873
3874 /* set segmentation enable bits for TSO/FSO */
3875 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3876 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3877
3878 return cmd_type;
3879 }
3880
ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)3881 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3882 u32 tx_flags, unsigned int paylen)
3883 {
3884 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3885
3886 /* enable L4 checksum for TSO and TX checksum offload */
3887 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3888 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3889
3890 /* enble IPv4 checksum for TSO */
3891 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3892 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3893
3894 /* use index 1 context for TSO/FSO/FCOE */
3895 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3896 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3897
3898 /* Check Context must be set if Tx switch is enabled, which it
3899 * always is for case where virtual functions are running
3900 */
3901 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3902
3903 tx_desc->read.olinfo_status = olinfo_status;
3904 }
3905
ixgbevf_tx_map(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,const u8 hdr_len)3906 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3907 struct ixgbevf_tx_buffer *first,
3908 const u8 hdr_len)
3909 {
3910 struct sk_buff *skb = first->skb;
3911 struct ixgbevf_tx_buffer *tx_buffer;
3912 union ixgbe_adv_tx_desc *tx_desc;
3913 struct skb_frag_struct *frag;
3914 dma_addr_t dma;
3915 unsigned int data_len, size;
3916 u32 tx_flags = first->tx_flags;
3917 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3918 u16 i = tx_ring->next_to_use;
3919
3920 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3921
3922 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3923
3924 size = skb_headlen(skb);
3925 data_len = skb->data_len;
3926
3927 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3928
3929 tx_buffer = first;
3930
3931 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3932 if (dma_mapping_error(tx_ring->dev, dma))
3933 goto dma_error;
3934
3935 /* record length, and DMA address */
3936 dma_unmap_len_set(tx_buffer, len, size);
3937 dma_unmap_addr_set(tx_buffer, dma, dma);
3938
3939 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3940
3941 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3942 tx_desc->read.cmd_type_len =
3943 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3944
3945 i++;
3946 tx_desc++;
3947 if (i == tx_ring->count) {
3948 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3949 i = 0;
3950 }
3951 tx_desc->read.olinfo_status = 0;
3952
3953 dma += IXGBE_MAX_DATA_PER_TXD;
3954 size -= IXGBE_MAX_DATA_PER_TXD;
3955
3956 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3957 }
3958
3959 if (likely(!data_len))
3960 break;
3961
3962 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3963
3964 i++;
3965 tx_desc++;
3966 if (i == tx_ring->count) {
3967 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3968 i = 0;
3969 }
3970 tx_desc->read.olinfo_status = 0;
3971
3972 size = skb_frag_size(frag);
3973 data_len -= size;
3974
3975 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3976 DMA_TO_DEVICE);
3977
3978 tx_buffer = &tx_ring->tx_buffer_info[i];
3979 }
3980
3981 /* write last descriptor with RS and EOP bits */
3982 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3983 tx_desc->read.cmd_type_len = cmd_type;
3984
3985 /* set the timestamp */
3986 first->time_stamp = jiffies;
3987
3988 /* Force memory writes to complete before letting h/w know there
3989 * are new descriptors to fetch. (Only applicable for weak-ordered
3990 * memory model archs, such as IA-64).
3991 *
3992 * We also need this memory barrier (wmb) to make certain all of the
3993 * status bits have been updated before next_to_watch is written.
3994 */
3995 wmb();
3996
3997 /* set next_to_watch value indicating a packet is present */
3998 first->next_to_watch = tx_desc;
3999
4000 i++;
4001 if (i == tx_ring->count)
4002 i = 0;
4003
4004 tx_ring->next_to_use = i;
4005
4006 /* notify HW of packet */
4007 ixgbevf_write_tail(tx_ring, i);
4008
4009 return;
4010 dma_error:
4011 dev_err(tx_ring->dev, "TX DMA map failed\n");
4012 tx_buffer = &tx_ring->tx_buffer_info[i];
4013
4014 /* clear dma mappings for failed tx_buffer_info map */
4015 while (tx_buffer != first) {
4016 if (dma_unmap_len(tx_buffer, len))
4017 dma_unmap_page(tx_ring->dev,
4018 dma_unmap_addr(tx_buffer, dma),
4019 dma_unmap_len(tx_buffer, len),
4020 DMA_TO_DEVICE);
4021 dma_unmap_len_set(tx_buffer, len, 0);
4022
4023 if (i-- == 0)
4024 i += tx_ring->count;
4025 tx_buffer = &tx_ring->tx_buffer_info[i];
4026 }
4027
4028 if (dma_unmap_len(tx_buffer, len))
4029 dma_unmap_single(tx_ring->dev,
4030 dma_unmap_addr(tx_buffer, dma),
4031 dma_unmap_len(tx_buffer, len),
4032 DMA_TO_DEVICE);
4033 dma_unmap_len_set(tx_buffer, len, 0);
4034
4035 dev_kfree_skb_any(tx_buffer->skb);
4036 tx_buffer->skb = NULL;
4037
4038 tx_ring->next_to_use = i;
4039 }
4040
__ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size)4041 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4042 {
4043 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4044 /* Herbert's original patch had:
4045 * smp_mb__after_netif_stop_queue();
4046 * but since that doesn't exist yet, just open code it.
4047 */
4048 smp_mb();
4049
4050 /* We need to check again in a case another CPU has just
4051 * made room available.
4052 */
4053 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4054 return -EBUSY;
4055
4056 /* A reprieve! - use start_queue because it doesn't call schedule */
4057 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4058 ++tx_ring->tx_stats.restart_queue;
4059
4060 return 0;
4061 }
4062
ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size)4063 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4064 {
4065 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4066 return 0;
4067 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4068 }
4069
ixgbevf_xmit_frame_ring(struct sk_buff * skb,struct ixgbevf_ring * tx_ring)4070 static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4071 struct ixgbevf_ring *tx_ring)
4072 {
4073 struct ixgbevf_tx_buffer *first;
4074 int tso;
4075 u32 tx_flags = 0;
4076 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4077 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4078 unsigned short f;
4079 #endif
4080 u8 hdr_len = 0;
4081 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4082
4083 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4084 dev_kfree_skb_any(skb);
4085 return NETDEV_TX_OK;
4086 }
4087
4088 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
4089 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
4090 * + 2 desc gap to keep tail from touching head,
4091 * + 1 desc for context descriptor,
4092 * otherwise try next time
4093 */
4094 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4095 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4096 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4097 #else
4098 count += skb_shinfo(skb)->nr_frags;
4099 #endif
4100 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4101 tx_ring->tx_stats.tx_busy++;
4102 return NETDEV_TX_BUSY;
4103 }
4104
4105 /* record the location of the first descriptor for this packet */
4106 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4107 first->skb = skb;
4108 first->bytecount = skb->len;
4109 first->gso_segs = 1;
4110
4111 if (skb_vlan_tag_present(skb)) {
4112 tx_flags |= skb_vlan_tag_get(skb);
4113 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4114 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4115 }
4116
4117 /* record initial flags and protocol */
4118 first->tx_flags = tx_flags;
4119 first->protocol = vlan_get_protocol(skb);
4120
4121 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
4122 if (tso < 0)
4123 goto out_drop;
4124 else if (!tso)
4125 ixgbevf_tx_csum(tx_ring, first);
4126
4127 ixgbevf_tx_map(tx_ring, first, hdr_len);
4128
4129 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4130
4131 return NETDEV_TX_OK;
4132
4133 out_drop:
4134 dev_kfree_skb_any(first->skb);
4135 first->skb = NULL;
4136
4137 return NETDEV_TX_OK;
4138 }
4139
ixgbevf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)4140 static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4141 {
4142 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4143 struct ixgbevf_ring *tx_ring;
4144
4145 if (skb->len <= 0) {
4146 dev_kfree_skb_any(skb);
4147 return NETDEV_TX_OK;
4148 }
4149
4150 /* The minimum packet size for olinfo paylen is 17 so pad the skb
4151 * in order to meet this minimum size requirement.
4152 */
4153 if (skb->len < 17) {
4154 if (skb_padto(skb, 17))
4155 return NETDEV_TX_OK;
4156 skb->len = 17;
4157 }
4158
4159 tx_ring = adapter->tx_ring[skb->queue_mapping];
4160 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4161 }
4162
4163 /**
4164 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4165 * @netdev: network interface device structure
4166 * @p: pointer to an address structure
4167 *
4168 * Returns 0 on success, negative on failure
4169 **/
ixgbevf_set_mac(struct net_device * netdev,void * p)4170 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4171 {
4172 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4173 struct ixgbe_hw *hw = &adapter->hw;
4174 struct sockaddr *addr = p;
4175 int err;
4176
4177 if (!is_valid_ether_addr(addr->sa_data))
4178 return -EADDRNOTAVAIL;
4179
4180 spin_lock_bh(&adapter->mbx_lock);
4181
4182 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4183
4184 spin_unlock_bh(&adapter->mbx_lock);
4185
4186 if (err)
4187 return -EPERM;
4188
4189 ether_addr_copy(hw->mac.addr, addr->sa_data);
4190 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4191 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4192
4193 return 0;
4194 }
4195
4196 /**
4197 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4198 * @netdev: network interface device structure
4199 * @new_mtu: new value for maximum frame size
4200 *
4201 * Returns 0 on success, negative on failure
4202 **/
ixgbevf_change_mtu(struct net_device * netdev,int new_mtu)4203 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4204 {
4205 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4206 struct ixgbe_hw *hw = &adapter->hw;
4207 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4208 int ret;
4209
4210 /* prevent MTU being changed to a size unsupported by XDP */
4211 if (adapter->xdp_prog) {
4212 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4213 return -EPERM;
4214 }
4215
4216 spin_lock_bh(&adapter->mbx_lock);
4217 /* notify the PF of our intent to use this size of frame */
4218 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4219 spin_unlock_bh(&adapter->mbx_lock);
4220 if (ret)
4221 return -EINVAL;
4222
4223 hw_dbg(hw, "changing MTU from %d to %d\n",
4224 netdev->mtu, new_mtu);
4225
4226 /* must set new MTU before calling down or up */
4227 netdev->mtu = new_mtu;
4228
4229 if (netif_running(netdev))
4230 ixgbevf_reinit_locked(adapter);
4231
4232 return 0;
4233 }
4234
ixgbevf_suspend(struct pci_dev * pdev,pm_message_t state)4235 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4236 {
4237 struct net_device *netdev = pci_get_drvdata(pdev);
4238 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4239 #ifdef CONFIG_PM
4240 int retval = 0;
4241 #endif
4242
4243 rtnl_lock();
4244 netif_device_detach(netdev);
4245
4246 if (netif_running(netdev))
4247 ixgbevf_close_suspend(adapter);
4248
4249 ixgbevf_clear_interrupt_scheme(adapter);
4250 rtnl_unlock();
4251
4252 #ifdef CONFIG_PM
4253 retval = pci_save_state(pdev);
4254 if (retval)
4255 return retval;
4256
4257 #endif
4258 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4259 pci_disable_device(pdev);
4260
4261 return 0;
4262 }
4263
4264 #ifdef CONFIG_PM
ixgbevf_resume(struct pci_dev * pdev)4265 static int ixgbevf_resume(struct pci_dev *pdev)
4266 {
4267 struct net_device *netdev = pci_get_drvdata(pdev);
4268 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4269 u32 err;
4270
4271 pci_restore_state(pdev);
4272 /* pci_restore_state clears dev->state_saved so call
4273 * pci_save_state to restore it.
4274 */
4275 pci_save_state(pdev);
4276
4277 err = pci_enable_device_mem(pdev);
4278 if (err) {
4279 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4280 return err;
4281 }
4282
4283 adapter->hw.hw_addr = adapter->io_addr;
4284 smp_mb__before_atomic();
4285 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4286 pci_set_master(pdev);
4287
4288 ixgbevf_reset(adapter);
4289
4290 rtnl_lock();
4291 err = ixgbevf_init_interrupt_scheme(adapter);
4292 if (!err && netif_running(netdev))
4293 err = ixgbevf_open(netdev);
4294 rtnl_unlock();
4295 if (err)
4296 return err;
4297
4298 netif_device_attach(netdev);
4299
4300 return err;
4301 }
4302
4303 #endif /* CONFIG_PM */
ixgbevf_shutdown(struct pci_dev * pdev)4304 static void ixgbevf_shutdown(struct pci_dev *pdev)
4305 {
4306 ixgbevf_suspend(pdev, PMSG_SUSPEND);
4307 }
4308
ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 * stats,const struct ixgbevf_ring * ring)4309 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4310 const struct ixgbevf_ring *ring)
4311 {
4312 u64 bytes, packets;
4313 unsigned int start;
4314
4315 if (ring) {
4316 do {
4317 start = u64_stats_fetch_begin_irq(&ring->syncp);
4318 bytes = ring->stats.bytes;
4319 packets = ring->stats.packets;
4320 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4321 stats->tx_bytes += bytes;
4322 stats->tx_packets += packets;
4323 }
4324 }
4325
ixgbevf_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)4326 static void ixgbevf_get_stats(struct net_device *netdev,
4327 struct rtnl_link_stats64 *stats)
4328 {
4329 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4330 unsigned int start;
4331 u64 bytes, packets;
4332 const struct ixgbevf_ring *ring;
4333 int i;
4334
4335 ixgbevf_update_stats(adapter);
4336
4337 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4338
4339 rcu_read_lock();
4340 for (i = 0; i < adapter->num_rx_queues; i++) {
4341 ring = adapter->rx_ring[i];
4342 do {
4343 start = u64_stats_fetch_begin_irq(&ring->syncp);
4344 bytes = ring->stats.bytes;
4345 packets = ring->stats.packets;
4346 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4347 stats->rx_bytes += bytes;
4348 stats->rx_packets += packets;
4349 }
4350
4351 for (i = 0; i < adapter->num_tx_queues; i++) {
4352 ring = adapter->tx_ring[i];
4353 ixgbevf_get_tx_ring_stats(stats, ring);
4354 }
4355
4356 for (i = 0; i < adapter->num_xdp_queues; i++) {
4357 ring = adapter->xdp_ring[i];
4358 ixgbevf_get_tx_ring_stats(stats, ring);
4359 }
4360 rcu_read_unlock();
4361 }
4362
4363 #define IXGBEVF_MAX_MAC_HDR_LEN 127
4364 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4365
4366 static netdev_features_t
ixgbevf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4367 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4368 netdev_features_t features)
4369 {
4370 unsigned int network_hdr_len, mac_hdr_len;
4371
4372 /* Make certain the headers can be described by a context descriptor */
4373 mac_hdr_len = skb_network_header(skb) - skb->data;
4374 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4375 return features & ~(NETIF_F_HW_CSUM |
4376 NETIF_F_SCTP_CRC |
4377 NETIF_F_HW_VLAN_CTAG_TX |
4378 NETIF_F_TSO |
4379 NETIF_F_TSO6);
4380
4381 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4382 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4383 return features & ~(NETIF_F_HW_CSUM |
4384 NETIF_F_SCTP_CRC |
4385 NETIF_F_TSO |
4386 NETIF_F_TSO6);
4387
4388 /* We can only support IPV4 TSO in tunnels if we can mangle the
4389 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4390 */
4391 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4392 features &= ~NETIF_F_TSO;
4393
4394 return features;
4395 }
4396
ixgbevf_xdp_setup(struct net_device * dev,struct bpf_prog * prog)4397 static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4398 {
4399 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4400 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4401 struct bpf_prog *old_prog;
4402
4403 /* verify ixgbevf ring attributes are sufficient for XDP */
4404 for (i = 0; i < adapter->num_rx_queues; i++) {
4405 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4406
4407 if (frame_size > ixgbevf_rx_bufsz(ring))
4408 return -EINVAL;
4409 }
4410
4411 old_prog = xchg(&adapter->xdp_prog, prog);
4412
4413 /* If transitioning XDP modes reconfigure rings */
4414 if (!!prog != !!old_prog) {
4415 /* Hardware has to reinitialize queues and interrupts to
4416 * match packet buffer alignment. Unfortunately, the
4417 * hardware is not flexible enough to do this dynamically.
4418 */
4419 if (netif_running(dev))
4420 ixgbevf_close(dev);
4421
4422 ixgbevf_clear_interrupt_scheme(adapter);
4423 ixgbevf_init_interrupt_scheme(adapter);
4424
4425 if (netif_running(dev))
4426 ixgbevf_open(dev);
4427 } else {
4428 for (i = 0; i < adapter->num_rx_queues; i++)
4429 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4430 }
4431
4432 if (old_prog)
4433 bpf_prog_put(old_prog);
4434
4435 return 0;
4436 }
4437
ixgbevf_xdp(struct net_device * dev,struct netdev_bpf * xdp)4438 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4439 {
4440 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4441
4442 switch (xdp->command) {
4443 case XDP_SETUP_PROG:
4444 return ixgbevf_xdp_setup(dev, xdp->prog);
4445 case XDP_QUERY_PROG:
4446 xdp->prog_id = adapter->xdp_prog ?
4447 adapter->xdp_prog->aux->id : 0;
4448 return 0;
4449 default:
4450 return -EINVAL;
4451 }
4452 }
4453
4454 static const struct net_device_ops ixgbevf_netdev_ops = {
4455 .ndo_open = ixgbevf_open,
4456 .ndo_stop = ixgbevf_close,
4457 .ndo_start_xmit = ixgbevf_xmit_frame,
4458 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4459 .ndo_get_stats64 = ixgbevf_get_stats,
4460 .ndo_validate_addr = eth_validate_addr,
4461 .ndo_set_mac_address = ixgbevf_set_mac,
4462 .ndo_change_mtu = ixgbevf_change_mtu,
4463 .ndo_tx_timeout = ixgbevf_tx_timeout,
4464 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4465 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4466 .ndo_features_check = ixgbevf_features_check,
4467 .ndo_bpf = ixgbevf_xdp,
4468 };
4469
ixgbevf_assign_netdev_ops(struct net_device * dev)4470 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4471 {
4472 dev->netdev_ops = &ixgbevf_netdev_ops;
4473 ixgbevf_set_ethtool_ops(dev);
4474 dev->watchdog_timeo = 5 * HZ;
4475 }
4476
4477 /**
4478 * ixgbevf_probe - Device Initialization Routine
4479 * @pdev: PCI device information struct
4480 * @ent: entry in ixgbevf_pci_tbl
4481 *
4482 * Returns 0 on success, negative on failure
4483 *
4484 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4485 * The OS initialization, configuring of the adapter private structure,
4486 * and a hardware reset occur.
4487 **/
ixgbevf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)4488 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4489 {
4490 struct net_device *netdev;
4491 struct ixgbevf_adapter *adapter = NULL;
4492 struct ixgbe_hw *hw = NULL;
4493 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4494 int err, pci_using_dac;
4495 bool disable_dev = false;
4496
4497 err = pci_enable_device(pdev);
4498 if (err)
4499 return err;
4500
4501 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4502 pci_using_dac = 1;
4503 } else {
4504 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4505 if (err) {
4506 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4507 goto err_dma;
4508 }
4509 pci_using_dac = 0;
4510 }
4511
4512 err = pci_request_regions(pdev, ixgbevf_driver_name);
4513 if (err) {
4514 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4515 goto err_pci_reg;
4516 }
4517
4518 pci_set_master(pdev);
4519
4520 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4521 MAX_TX_QUEUES);
4522 if (!netdev) {
4523 err = -ENOMEM;
4524 goto err_alloc_etherdev;
4525 }
4526
4527 SET_NETDEV_DEV(netdev, &pdev->dev);
4528
4529 adapter = netdev_priv(netdev);
4530
4531 adapter->netdev = netdev;
4532 adapter->pdev = pdev;
4533 hw = &adapter->hw;
4534 hw->back = adapter;
4535 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4536
4537 /* call save state here in standalone driver because it relies on
4538 * adapter struct to exist, and needs to call netdev_priv
4539 */
4540 pci_save_state(pdev);
4541
4542 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4543 pci_resource_len(pdev, 0));
4544 adapter->io_addr = hw->hw_addr;
4545 if (!hw->hw_addr) {
4546 err = -EIO;
4547 goto err_ioremap;
4548 }
4549
4550 ixgbevf_assign_netdev_ops(netdev);
4551
4552 /* Setup HW API */
4553 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4554 hw->mac.type = ii->mac;
4555
4556 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4557 sizeof(struct ixgbe_mbx_operations));
4558
4559 /* setup the private structure */
4560 err = ixgbevf_sw_init(adapter);
4561 if (err)
4562 goto err_sw_init;
4563
4564 /* The HW MAC address was set and/or determined in sw_init */
4565 if (!is_valid_ether_addr(netdev->dev_addr)) {
4566 pr_err("invalid MAC address\n");
4567 err = -EIO;
4568 goto err_sw_init;
4569 }
4570
4571 netdev->hw_features = NETIF_F_SG |
4572 NETIF_F_TSO |
4573 NETIF_F_TSO6 |
4574 NETIF_F_RXCSUM |
4575 NETIF_F_HW_CSUM |
4576 NETIF_F_SCTP_CRC;
4577
4578 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4579 NETIF_F_GSO_GRE_CSUM | \
4580 NETIF_F_GSO_IPXIP4 | \
4581 NETIF_F_GSO_IPXIP6 | \
4582 NETIF_F_GSO_UDP_TUNNEL | \
4583 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4584
4585 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4586 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4587 IXGBEVF_GSO_PARTIAL_FEATURES;
4588
4589 netdev->features = netdev->hw_features;
4590
4591 if (pci_using_dac)
4592 netdev->features |= NETIF_F_HIGHDMA;
4593
4594 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4595 netdev->mpls_features |= NETIF_F_SG |
4596 NETIF_F_TSO |
4597 NETIF_F_TSO6 |
4598 NETIF_F_HW_CSUM;
4599 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4600 netdev->hw_enc_features |= netdev->vlan_features;
4601
4602 /* set this bit last since it cannot be part of vlan_features */
4603 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4604 NETIF_F_HW_VLAN_CTAG_RX |
4605 NETIF_F_HW_VLAN_CTAG_TX;
4606
4607 netdev->priv_flags |= IFF_UNICAST_FLT;
4608
4609 /* MTU range: 68 - 1504 or 9710 */
4610 netdev->min_mtu = ETH_MIN_MTU;
4611 switch (adapter->hw.api_version) {
4612 case ixgbe_mbox_api_11:
4613 case ixgbe_mbox_api_12:
4614 case ixgbe_mbox_api_13:
4615 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4616 (ETH_HLEN + ETH_FCS_LEN);
4617 break;
4618 default:
4619 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4620 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4621 (ETH_HLEN + ETH_FCS_LEN);
4622 else
4623 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4624 break;
4625 }
4626
4627 if (IXGBE_REMOVED(hw->hw_addr)) {
4628 err = -EIO;
4629 goto err_sw_init;
4630 }
4631
4632 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4633
4634 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4635 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4636 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4637
4638 err = ixgbevf_init_interrupt_scheme(adapter);
4639 if (err)
4640 goto err_sw_init;
4641
4642 strcpy(netdev->name, "eth%d");
4643
4644 err = register_netdev(netdev);
4645 if (err)
4646 goto err_register;
4647
4648 pci_set_drvdata(pdev, netdev);
4649 netif_carrier_off(netdev);
4650
4651 ixgbevf_init_last_counter_stats(adapter);
4652
4653 /* print the VF info */
4654 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4655 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4656
4657 switch (hw->mac.type) {
4658 case ixgbe_mac_X550_vf:
4659 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4660 break;
4661 case ixgbe_mac_X540_vf:
4662 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4663 break;
4664 case ixgbe_mac_82599_vf:
4665 default:
4666 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4667 break;
4668 }
4669
4670 return 0;
4671
4672 err_register:
4673 ixgbevf_clear_interrupt_scheme(adapter);
4674 err_sw_init:
4675 ixgbevf_reset_interrupt_capability(adapter);
4676 iounmap(adapter->io_addr);
4677 kfree(adapter->rss_key);
4678 err_ioremap:
4679 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4680 free_netdev(netdev);
4681 err_alloc_etherdev:
4682 pci_release_regions(pdev);
4683 err_pci_reg:
4684 err_dma:
4685 if (!adapter || disable_dev)
4686 pci_disable_device(pdev);
4687 return err;
4688 }
4689
4690 /**
4691 * ixgbevf_remove - Device Removal Routine
4692 * @pdev: PCI device information struct
4693 *
4694 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4695 * that it should release a PCI device. The could be caused by a
4696 * Hot-Plug event, or because the driver is going to be removed from
4697 * memory.
4698 **/
ixgbevf_remove(struct pci_dev * pdev)4699 static void ixgbevf_remove(struct pci_dev *pdev)
4700 {
4701 struct net_device *netdev = pci_get_drvdata(pdev);
4702 struct ixgbevf_adapter *adapter;
4703 bool disable_dev;
4704
4705 if (!netdev)
4706 return;
4707
4708 adapter = netdev_priv(netdev);
4709
4710 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4711 cancel_work_sync(&adapter->service_task);
4712
4713 if (netdev->reg_state == NETREG_REGISTERED)
4714 unregister_netdev(netdev);
4715
4716 ixgbevf_clear_interrupt_scheme(adapter);
4717 ixgbevf_reset_interrupt_capability(adapter);
4718
4719 iounmap(adapter->io_addr);
4720 pci_release_regions(pdev);
4721
4722 hw_dbg(&adapter->hw, "Remove complete\n");
4723
4724 kfree(adapter->rss_key);
4725 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4726 free_netdev(netdev);
4727
4728 if (disable_dev)
4729 pci_disable_device(pdev);
4730 }
4731
4732 /**
4733 * ixgbevf_io_error_detected - called when PCI error is detected
4734 * @pdev: Pointer to PCI device
4735 * @state: The current pci connection state
4736 *
4737 * This function is called after a PCI bus error affecting
4738 * this device has been detected.
4739 **/
ixgbevf_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4740 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4741 pci_channel_state_t state)
4742 {
4743 struct net_device *netdev = pci_get_drvdata(pdev);
4744 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4745
4746 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4747 return PCI_ERS_RESULT_DISCONNECT;
4748
4749 rtnl_lock();
4750 netif_device_detach(netdev);
4751
4752 if (netif_running(netdev))
4753 ixgbevf_close_suspend(adapter);
4754
4755 if (state == pci_channel_io_perm_failure) {
4756 rtnl_unlock();
4757 return PCI_ERS_RESULT_DISCONNECT;
4758 }
4759
4760 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4761 pci_disable_device(pdev);
4762 rtnl_unlock();
4763
4764 /* Request a slot slot reset. */
4765 return PCI_ERS_RESULT_NEED_RESET;
4766 }
4767
4768 /**
4769 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4770 * @pdev: Pointer to PCI device
4771 *
4772 * Restart the card from scratch, as if from a cold-boot. Implementation
4773 * resembles the first-half of the ixgbevf_resume routine.
4774 **/
ixgbevf_io_slot_reset(struct pci_dev * pdev)4775 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4776 {
4777 struct net_device *netdev = pci_get_drvdata(pdev);
4778 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4779
4780 if (pci_enable_device_mem(pdev)) {
4781 dev_err(&pdev->dev,
4782 "Cannot re-enable PCI device after reset.\n");
4783 return PCI_ERS_RESULT_DISCONNECT;
4784 }
4785
4786 adapter->hw.hw_addr = adapter->io_addr;
4787 smp_mb__before_atomic();
4788 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4789 pci_set_master(pdev);
4790
4791 ixgbevf_reset(adapter);
4792
4793 return PCI_ERS_RESULT_RECOVERED;
4794 }
4795
4796 /**
4797 * ixgbevf_io_resume - called when traffic can start flowing again.
4798 * @pdev: Pointer to PCI device
4799 *
4800 * This callback is called when the error recovery driver tells us that
4801 * its OK to resume normal operation. Implementation resembles the
4802 * second-half of the ixgbevf_resume routine.
4803 **/
ixgbevf_io_resume(struct pci_dev * pdev)4804 static void ixgbevf_io_resume(struct pci_dev *pdev)
4805 {
4806 struct net_device *netdev = pci_get_drvdata(pdev);
4807
4808 rtnl_lock();
4809 if (netif_running(netdev))
4810 ixgbevf_open(netdev);
4811
4812 netif_device_attach(netdev);
4813 rtnl_unlock();
4814 }
4815
4816 /* PCI Error Recovery (ERS) */
4817 static const struct pci_error_handlers ixgbevf_err_handler = {
4818 .error_detected = ixgbevf_io_error_detected,
4819 .slot_reset = ixgbevf_io_slot_reset,
4820 .resume = ixgbevf_io_resume,
4821 };
4822
4823 static struct pci_driver ixgbevf_driver = {
4824 .name = ixgbevf_driver_name,
4825 .id_table = ixgbevf_pci_tbl,
4826 .probe = ixgbevf_probe,
4827 .remove = ixgbevf_remove,
4828 #ifdef CONFIG_PM
4829 /* Power Management Hooks */
4830 .suspend = ixgbevf_suspend,
4831 .resume = ixgbevf_resume,
4832 #endif
4833 .shutdown = ixgbevf_shutdown,
4834 .err_handler = &ixgbevf_err_handler
4835 };
4836
4837 /**
4838 * ixgbevf_init_module - Driver Registration Routine
4839 *
4840 * ixgbevf_init_module is the first routine called when the driver is
4841 * loaded. All it does is register with the PCI subsystem.
4842 **/
ixgbevf_init_module(void)4843 static int __init ixgbevf_init_module(void)
4844 {
4845 pr_info("%s - version %s\n", ixgbevf_driver_string,
4846 ixgbevf_driver_version);
4847
4848 pr_info("%s\n", ixgbevf_copyright);
4849 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4850 if (!ixgbevf_wq) {
4851 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4852 return -ENOMEM;
4853 }
4854
4855 return pci_register_driver(&ixgbevf_driver);
4856 }
4857
4858 module_init(ixgbevf_init_module);
4859
4860 /**
4861 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4862 *
4863 * ixgbevf_exit_module is called just before the driver is removed
4864 * from memory.
4865 **/
ixgbevf_exit_module(void)4866 static void __exit ixgbevf_exit_module(void)
4867 {
4868 pci_unregister_driver(&ixgbevf_driver);
4869 if (ixgbevf_wq) {
4870 destroy_workqueue(ixgbevf_wq);
4871 ixgbevf_wq = NULL;
4872 }
4873 }
4874
4875 #ifdef DEBUG
4876 /**
4877 * ixgbevf_get_hw_dev_name - return device name string
4878 * used by hardware layer to print debugging information
4879 * @hw: pointer to private hardware struct
4880 **/
ixgbevf_get_hw_dev_name(struct ixgbe_hw * hw)4881 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4882 {
4883 struct ixgbevf_adapter *adapter = hw->back;
4884
4885 return adapter->netdev->name;
4886 }
4887
4888 #endif
4889 module_exit(ixgbevf_exit_module);
4890
4891 /* ixgbevf_main.c */
4892