1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2024 Google LLC
5 */
6
7 #include <linux/bpf.h>
8 #include <linux/cpumask.h>
9 #include <linux/etherdevice.h>
10 #include <linux/filter.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/workqueue.h>
18 #include <linux/utsname.h>
19 #include <linux/version.h>
20 #include <net/netdev_queues.h>
21 #include <net/sch_generic.h>
22 #include <net/xdp_sock_drv.h>
23 #include "gve.h"
24 #include "gve_dqo.h"
25 #include "gve_adminq.h"
26 #include "gve_register.h"
27 #include "gve_utils.h"
28
29 #define GVE_DEFAULT_RX_COPYBREAK (256)
30
31 #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
32 #define GVE_VERSION "1.0.0"
33 #define GVE_VERSION_PREFIX "GVE-"
34
35 // Minimum amount of time between queue kicks in msec (10 seconds)
36 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
37
38 char gve_driver_name[] = "gve";
39 const char gve_version_str[] = GVE_VERSION;
40 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
41
gve_verify_driver_compatibility(struct gve_priv * priv)42 static int gve_verify_driver_compatibility(struct gve_priv *priv)
43 {
44 int err;
45 struct gve_driver_info *driver_info;
46 dma_addr_t driver_info_bus;
47
48 driver_info = dma_alloc_coherent(&priv->pdev->dev,
49 sizeof(struct gve_driver_info),
50 &driver_info_bus, GFP_KERNEL);
51 if (!driver_info)
52 return -ENOMEM;
53
54 *driver_info = (struct gve_driver_info) {
55 .os_type = 1, /* Linux */
56 .os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
57 .os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
58 .os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
59 .driver_capability_flags = {
60 cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
61 cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
62 cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
63 cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
64 },
65 };
66 strscpy(driver_info->os_version_str1, utsname()->release,
67 sizeof(driver_info->os_version_str1));
68 strscpy(driver_info->os_version_str2, utsname()->version,
69 sizeof(driver_info->os_version_str2));
70
71 err = gve_adminq_verify_driver_compatibility(priv,
72 sizeof(struct gve_driver_info),
73 driver_info_bus);
74
75 /* It's ok if the device doesn't support this */
76 if (err == -EOPNOTSUPP)
77 err = 0;
78
79 dma_free_coherent(&priv->pdev->dev,
80 sizeof(struct gve_driver_info),
81 driver_info, driver_info_bus);
82 return err;
83 }
84
gve_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)85 static netdev_features_t gve_features_check(struct sk_buff *skb,
86 struct net_device *dev,
87 netdev_features_t features)
88 {
89 struct gve_priv *priv = netdev_priv(dev);
90
91 if (!gve_is_gqi(priv))
92 return gve_features_check_dqo(skb, dev, features);
93
94 return features;
95 }
96
gve_start_xmit(struct sk_buff * skb,struct net_device * dev)97 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
98 {
99 struct gve_priv *priv = netdev_priv(dev);
100
101 if (gve_is_gqi(priv))
102 return gve_tx(skb, dev);
103 else
104 return gve_tx_dqo(skb, dev);
105 }
106
gve_get_stats(struct net_device * dev,struct rtnl_link_stats64 * s)107 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
108 {
109 struct gve_priv *priv = netdev_priv(dev);
110 unsigned int start;
111 u64 packets, bytes;
112 int num_tx_queues;
113 int ring;
114
115 num_tx_queues = gve_num_tx_queues(priv);
116 if (priv->rx) {
117 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
118 do {
119 start =
120 u64_stats_fetch_begin(&priv->rx[ring].statss);
121 packets = priv->rx[ring].rpackets;
122 bytes = priv->rx[ring].rbytes;
123 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
124 start));
125 s->rx_packets += packets;
126 s->rx_bytes += bytes;
127 }
128 }
129 if (priv->tx) {
130 for (ring = 0; ring < num_tx_queues; ring++) {
131 do {
132 start =
133 u64_stats_fetch_begin(&priv->tx[ring].statss);
134 packets = priv->tx[ring].pkt_done;
135 bytes = priv->tx[ring].bytes_done;
136 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
137 start));
138 s->tx_packets += packets;
139 s->tx_bytes += bytes;
140 }
141 }
142 }
143
gve_alloc_flow_rule_caches(struct gve_priv * priv)144 static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
145 {
146 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
147 int err = 0;
148
149 if (!priv->max_flow_rules)
150 return 0;
151
152 flow_rules_cache->rules_cache =
153 kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache),
154 GFP_KERNEL);
155 if (!flow_rules_cache->rules_cache) {
156 dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
157 return -ENOMEM;
158 }
159
160 flow_rules_cache->rule_ids_cache =
161 kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
162 GFP_KERNEL);
163 if (!flow_rules_cache->rule_ids_cache) {
164 dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
165 err = -ENOMEM;
166 goto free_rules_cache;
167 }
168
169 return 0;
170
171 free_rules_cache:
172 kvfree(flow_rules_cache->rules_cache);
173 flow_rules_cache->rules_cache = NULL;
174 return err;
175 }
176
gve_free_flow_rule_caches(struct gve_priv * priv)177 static void gve_free_flow_rule_caches(struct gve_priv *priv)
178 {
179 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
180
181 kvfree(flow_rules_cache->rule_ids_cache);
182 flow_rules_cache->rule_ids_cache = NULL;
183 kvfree(flow_rules_cache->rules_cache);
184 flow_rules_cache->rules_cache = NULL;
185 }
186
gve_alloc_counter_array(struct gve_priv * priv)187 static int gve_alloc_counter_array(struct gve_priv *priv)
188 {
189 priv->counter_array =
190 dma_alloc_coherent(&priv->pdev->dev,
191 priv->num_event_counters *
192 sizeof(*priv->counter_array),
193 &priv->counter_array_bus, GFP_KERNEL);
194 if (!priv->counter_array)
195 return -ENOMEM;
196
197 return 0;
198 }
199
gve_free_counter_array(struct gve_priv * priv)200 static void gve_free_counter_array(struct gve_priv *priv)
201 {
202 if (!priv->counter_array)
203 return;
204
205 dma_free_coherent(&priv->pdev->dev,
206 priv->num_event_counters *
207 sizeof(*priv->counter_array),
208 priv->counter_array, priv->counter_array_bus);
209 priv->counter_array = NULL;
210 }
211
212 /* NIC requests to report stats */
gve_stats_report_task(struct work_struct * work)213 static void gve_stats_report_task(struct work_struct *work)
214 {
215 struct gve_priv *priv = container_of(work, struct gve_priv,
216 stats_report_task);
217 if (gve_get_do_report_stats(priv)) {
218 gve_handle_report_stats(priv);
219 gve_clear_do_report_stats(priv);
220 }
221 }
222
gve_stats_report_schedule(struct gve_priv * priv)223 static void gve_stats_report_schedule(struct gve_priv *priv)
224 {
225 if (!gve_get_probe_in_progress(priv) &&
226 !gve_get_reset_in_progress(priv)) {
227 gve_set_do_report_stats(priv);
228 queue_work(priv->gve_wq, &priv->stats_report_task);
229 }
230 }
231
gve_stats_report_timer(struct timer_list * t)232 static void gve_stats_report_timer(struct timer_list *t)
233 {
234 struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
235
236 mod_timer(&priv->stats_report_timer,
237 round_jiffies(jiffies +
238 msecs_to_jiffies(priv->stats_report_timer_period)));
239 gve_stats_report_schedule(priv);
240 }
241
gve_alloc_stats_report(struct gve_priv * priv)242 static int gve_alloc_stats_report(struct gve_priv *priv)
243 {
244 int tx_stats_num, rx_stats_num;
245
246 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
247 gve_num_tx_queues(priv);
248 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
249 priv->rx_cfg.num_queues;
250 priv->stats_report_len = struct_size(priv->stats_report, stats,
251 size_add(tx_stats_num, rx_stats_num));
252 priv->stats_report =
253 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
254 &priv->stats_report_bus, GFP_KERNEL);
255 if (!priv->stats_report)
256 return -ENOMEM;
257 /* Set up timer for the report-stats task */
258 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
259 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
260 return 0;
261 }
262
gve_free_stats_report(struct gve_priv * priv)263 static void gve_free_stats_report(struct gve_priv *priv)
264 {
265 if (!priv->stats_report)
266 return;
267
268 del_timer_sync(&priv->stats_report_timer);
269 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
270 priv->stats_report, priv->stats_report_bus);
271 priv->stats_report = NULL;
272 }
273
gve_mgmnt_intr(int irq,void * arg)274 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
275 {
276 struct gve_priv *priv = arg;
277
278 queue_work(priv->gve_wq, &priv->service_task);
279 return IRQ_HANDLED;
280 }
281
gve_intr(int irq,void * arg)282 static irqreturn_t gve_intr(int irq, void *arg)
283 {
284 struct gve_notify_block *block = arg;
285 struct gve_priv *priv = block->priv;
286
287 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
288 napi_schedule_irqoff(&block->napi);
289 return IRQ_HANDLED;
290 }
291
gve_intr_dqo(int irq,void * arg)292 static irqreturn_t gve_intr_dqo(int irq, void *arg)
293 {
294 struct gve_notify_block *block = arg;
295
296 /* Interrupts are automatically masked */
297 napi_schedule_irqoff(&block->napi);
298 return IRQ_HANDLED;
299 }
300
gve_is_napi_on_home_cpu(struct gve_priv * priv,u32 irq)301 static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
302 {
303 int cpu_curr = smp_processor_id();
304 const struct cpumask *aff_mask;
305
306 aff_mask = irq_get_effective_affinity_mask(irq);
307 if (unlikely(!aff_mask))
308 return 1;
309
310 return cpumask_test_cpu(cpu_curr, aff_mask);
311 }
312
gve_napi_poll(struct napi_struct * napi,int budget)313 int gve_napi_poll(struct napi_struct *napi, int budget)
314 {
315 struct gve_notify_block *block;
316 __be32 __iomem *irq_doorbell;
317 bool reschedule = false;
318 struct gve_priv *priv;
319 int work_done = 0;
320
321 block = container_of(napi, struct gve_notify_block, napi);
322 priv = block->priv;
323
324 if (block->tx) {
325 if (block->tx->q_num < priv->tx_cfg.num_queues)
326 reschedule |= gve_tx_poll(block, budget);
327 else if (budget)
328 reschedule |= gve_xdp_poll(block, budget);
329 }
330
331 if (!budget)
332 return 0;
333
334 if (block->rx) {
335 work_done = gve_rx_poll(block, budget);
336
337 /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
338 * TX and RX work done.
339 */
340 if (priv->xdp_prog)
341 work_done = max_t(int, work_done,
342 gve_xsk_tx_poll(block, budget));
343
344 reschedule |= work_done == budget;
345 }
346
347 if (reschedule)
348 return budget;
349
350 /* Complete processing - don't unmask irq if busy polling is enabled */
351 if (likely(napi_complete_done(napi, work_done))) {
352 irq_doorbell = gve_irq_doorbell(priv, block);
353 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
354
355 /* Ensure IRQ ACK is visible before we check pending work.
356 * If queue had issued updates, it would be truly visible.
357 */
358 mb();
359
360 if (block->tx)
361 reschedule |= gve_tx_clean_pending(priv, block->tx);
362 if (block->rx)
363 reschedule |= gve_rx_work_pending(block->rx);
364
365 if (reschedule && napi_schedule(napi))
366 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
367 }
368 return work_done;
369 }
370
gve_napi_poll_dqo(struct napi_struct * napi,int budget)371 int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
372 {
373 struct gve_notify_block *block =
374 container_of(napi, struct gve_notify_block, napi);
375 struct gve_priv *priv = block->priv;
376 bool reschedule = false;
377 int work_done = 0;
378
379 if (block->tx)
380 reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
381
382 if (!budget)
383 return 0;
384
385 if (block->rx) {
386 work_done = gve_rx_poll_dqo(block, budget);
387 reschedule |= work_done == budget;
388 }
389
390 if (reschedule) {
391 /* Reschedule by returning budget only if already on the correct
392 * cpu.
393 */
394 if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
395 return budget;
396
397 /* If not on the cpu with which this queue's irq has affinity
398 * with, we avoid rescheduling napi and arm the irq instead so
399 * that napi gets rescheduled back eventually onto the right
400 * cpu.
401 */
402 if (work_done == budget)
403 work_done--;
404 }
405
406 if (likely(napi_complete_done(napi, work_done))) {
407 /* Enable interrupts again.
408 *
409 * We don't need to repoll afterwards because HW supports the
410 * PCI MSI-X PBA feature.
411 *
412 * Another interrupt would be triggered if a new event came in
413 * since the last one.
414 */
415 gve_write_irq_doorbell_dqo(priv, block,
416 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
417 }
418
419 return work_done;
420 }
421
gve_alloc_notify_blocks(struct gve_priv * priv)422 static int gve_alloc_notify_blocks(struct gve_priv *priv)
423 {
424 int num_vecs_requested = priv->num_ntfy_blks + 1;
425 unsigned int active_cpus;
426 int vecs_enabled;
427 int i, j;
428 int err;
429
430 priv->msix_vectors = kvcalloc(num_vecs_requested,
431 sizeof(*priv->msix_vectors), GFP_KERNEL);
432 if (!priv->msix_vectors)
433 return -ENOMEM;
434 for (i = 0; i < num_vecs_requested; i++)
435 priv->msix_vectors[i].entry = i;
436 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
437 GVE_MIN_MSIX, num_vecs_requested);
438 if (vecs_enabled < 0) {
439 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
440 GVE_MIN_MSIX, vecs_enabled);
441 err = vecs_enabled;
442 goto abort_with_msix_vectors;
443 }
444 if (vecs_enabled != num_vecs_requested) {
445 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
446 int vecs_per_type = new_num_ntfy_blks / 2;
447 int vecs_left = new_num_ntfy_blks % 2;
448
449 priv->num_ntfy_blks = new_num_ntfy_blks;
450 priv->mgmt_msix_idx = priv->num_ntfy_blks;
451 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
452 vecs_per_type);
453 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
454 vecs_per_type + vecs_left);
455 dev_err(&priv->pdev->dev,
456 "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
457 vecs_enabled, priv->tx_cfg.max_queues,
458 priv->rx_cfg.max_queues);
459 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
460 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
461 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
462 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
463 }
464 /* Half the notification blocks go to TX and half to RX */
465 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
466
467 /* Setup Management Vector - the last vector */
468 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
469 pci_name(priv->pdev));
470 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
471 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
472 if (err) {
473 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
474 goto abort_with_msix_enabled;
475 }
476 priv->irq_db_indices =
477 dma_alloc_coherent(&priv->pdev->dev,
478 priv->num_ntfy_blks *
479 sizeof(*priv->irq_db_indices),
480 &priv->irq_db_indices_bus, GFP_KERNEL);
481 if (!priv->irq_db_indices) {
482 err = -ENOMEM;
483 goto abort_with_mgmt_vector;
484 }
485
486 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
487 sizeof(*priv->ntfy_blocks), GFP_KERNEL);
488 if (!priv->ntfy_blocks) {
489 err = -ENOMEM;
490 goto abort_with_irq_db_indices;
491 }
492
493 /* Setup the other blocks - the first n-1 vectors */
494 for (i = 0; i < priv->num_ntfy_blks; i++) {
495 struct gve_notify_block *block = &priv->ntfy_blocks[i];
496 int msix_idx = i;
497
498 snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
499 i, pci_name(priv->pdev));
500 block->priv = priv;
501 err = request_irq(priv->msix_vectors[msix_idx].vector,
502 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
503 0, block->name, block);
504 if (err) {
505 dev_err(&priv->pdev->dev,
506 "Failed to receive msix vector %d\n", i);
507 goto abort_with_some_ntfy_blocks;
508 }
509 block->irq = priv->msix_vectors[msix_idx].vector;
510 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
511 get_cpu_mask(i % active_cpus));
512 block->irq_db_index = &priv->irq_db_indices[i].index;
513 }
514 return 0;
515 abort_with_some_ntfy_blocks:
516 for (j = 0; j < i; j++) {
517 struct gve_notify_block *block = &priv->ntfy_blocks[j];
518 int msix_idx = j;
519
520 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
521 NULL);
522 free_irq(priv->msix_vectors[msix_idx].vector, block);
523 block->irq = 0;
524 }
525 kvfree(priv->ntfy_blocks);
526 priv->ntfy_blocks = NULL;
527 abort_with_irq_db_indices:
528 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
529 sizeof(*priv->irq_db_indices),
530 priv->irq_db_indices, priv->irq_db_indices_bus);
531 priv->irq_db_indices = NULL;
532 abort_with_mgmt_vector:
533 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
534 abort_with_msix_enabled:
535 pci_disable_msix(priv->pdev);
536 abort_with_msix_vectors:
537 kvfree(priv->msix_vectors);
538 priv->msix_vectors = NULL;
539 return err;
540 }
541
gve_free_notify_blocks(struct gve_priv * priv)542 static void gve_free_notify_blocks(struct gve_priv *priv)
543 {
544 int i;
545
546 if (!priv->msix_vectors)
547 return;
548
549 /* Free the irqs */
550 for (i = 0; i < priv->num_ntfy_blks; i++) {
551 struct gve_notify_block *block = &priv->ntfy_blocks[i];
552 int msix_idx = i;
553
554 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
555 NULL);
556 free_irq(priv->msix_vectors[msix_idx].vector, block);
557 block->irq = 0;
558 }
559 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
560 kvfree(priv->ntfy_blocks);
561 priv->ntfy_blocks = NULL;
562 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
563 sizeof(*priv->irq_db_indices),
564 priv->irq_db_indices, priv->irq_db_indices_bus);
565 priv->irq_db_indices = NULL;
566 pci_disable_msix(priv->pdev);
567 kvfree(priv->msix_vectors);
568 priv->msix_vectors = NULL;
569 }
570
gve_setup_device_resources(struct gve_priv * priv)571 static int gve_setup_device_resources(struct gve_priv *priv)
572 {
573 int err;
574
575 err = gve_alloc_flow_rule_caches(priv);
576 if (err)
577 return err;
578 err = gve_alloc_counter_array(priv);
579 if (err)
580 goto abort_with_flow_rule_caches;
581 err = gve_alloc_notify_blocks(priv);
582 if (err)
583 goto abort_with_counter;
584 err = gve_alloc_stats_report(priv);
585 if (err)
586 goto abort_with_ntfy_blocks;
587 err = gve_adminq_configure_device_resources(priv,
588 priv->counter_array_bus,
589 priv->num_event_counters,
590 priv->irq_db_indices_bus,
591 priv->num_ntfy_blks);
592 if (unlikely(err)) {
593 dev_err(&priv->pdev->dev,
594 "could not setup device_resources: err=%d\n", err);
595 err = -ENXIO;
596 goto abort_with_stats_report;
597 }
598
599 if (!gve_is_gqi(priv)) {
600 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
601 GFP_KERNEL);
602 if (!priv->ptype_lut_dqo) {
603 err = -ENOMEM;
604 goto abort_with_stats_report;
605 }
606 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
607 if (err) {
608 dev_err(&priv->pdev->dev,
609 "Failed to get ptype map: err=%d\n", err);
610 goto abort_with_ptype_lut;
611 }
612 }
613
614 err = gve_adminq_report_stats(priv, priv->stats_report_len,
615 priv->stats_report_bus,
616 GVE_STATS_REPORT_TIMER_PERIOD);
617 if (err)
618 dev_err(&priv->pdev->dev,
619 "Failed to report stats: err=%d\n", err);
620 gve_set_device_resources_ok(priv);
621 return 0;
622
623 abort_with_ptype_lut:
624 kvfree(priv->ptype_lut_dqo);
625 priv->ptype_lut_dqo = NULL;
626 abort_with_stats_report:
627 gve_free_stats_report(priv);
628 abort_with_ntfy_blocks:
629 gve_free_notify_blocks(priv);
630 abort_with_counter:
631 gve_free_counter_array(priv);
632 abort_with_flow_rule_caches:
633 gve_free_flow_rule_caches(priv);
634
635 return err;
636 }
637
638 static void gve_trigger_reset(struct gve_priv *priv);
639
gve_teardown_device_resources(struct gve_priv * priv)640 static void gve_teardown_device_resources(struct gve_priv *priv)
641 {
642 int err;
643
644 /* Tell device its resources are being freed */
645 if (gve_get_device_resources_ok(priv)) {
646 err = gve_flow_rules_reset(priv);
647 if (err) {
648 dev_err(&priv->pdev->dev,
649 "Failed to reset flow rules: err=%d\n", err);
650 gve_trigger_reset(priv);
651 }
652 /* detach the stats report */
653 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
654 if (err) {
655 dev_err(&priv->pdev->dev,
656 "Failed to detach stats report: err=%d\n", err);
657 gve_trigger_reset(priv);
658 }
659 err = gve_adminq_deconfigure_device_resources(priv);
660 if (err) {
661 dev_err(&priv->pdev->dev,
662 "Could not deconfigure device resources: err=%d\n",
663 err);
664 gve_trigger_reset(priv);
665 }
666 }
667
668 kvfree(priv->ptype_lut_dqo);
669 priv->ptype_lut_dqo = NULL;
670
671 gve_free_flow_rule_caches(priv);
672 gve_free_counter_array(priv);
673 gve_free_notify_blocks(priv);
674 gve_free_stats_report(priv);
675 gve_clear_device_resources_ok(priv);
676 }
677
gve_unregister_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)678 static int gve_unregister_qpl(struct gve_priv *priv,
679 struct gve_queue_page_list *qpl)
680 {
681 int err;
682
683 if (!qpl)
684 return 0;
685
686 err = gve_adminq_unregister_page_list(priv, qpl->id);
687 if (err) {
688 netif_err(priv, drv, priv->dev,
689 "Failed to unregister queue page list %d\n",
690 qpl->id);
691 return err;
692 }
693
694 priv->num_registered_pages -= qpl->num_entries;
695 return 0;
696 }
697
gve_register_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)698 static int gve_register_qpl(struct gve_priv *priv,
699 struct gve_queue_page_list *qpl)
700 {
701 int pages;
702 int err;
703
704 if (!qpl)
705 return 0;
706
707 pages = qpl->num_entries;
708
709 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
710 netif_err(priv, drv, priv->dev,
711 "Reached max number of registered pages %llu > %llu\n",
712 pages + priv->num_registered_pages,
713 priv->max_registered_pages);
714 return -EINVAL;
715 }
716
717 err = gve_adminq_register_page_list(priv, qpl);
718 if (err) {
719 netif_err(priv, drv, priv->dev,
720 "failed to register queue page list %d\n",
721 qpl->id);
722 return err;
723 }
724
725 priv->num_registered_pages += pages;
726 return 0;
727 }
728
gve_tx_get_qpl(struct gve_priv * priv,int idx)729 static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
730 {
731 struct gve_tx_ring *tx = &priv->tx[idx];
732
733 if (gve_is_gqi(priv))
734 return tx->tx_fifo.qpl;
735 else
736 return tx->dqo.qpl;
737 }
738
gve_rx_get_qpl(struct gve_priv * priv,int idx)739 static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
740 {
741 struct gve_rx_ring *rx = &priv->rx[idx];
742
743 if (gve_is_gqi(priv))
744 return rx->data.qpl;
745 else
746 return rx->dqo.qpl;
747 }
748
gve_register_xdp_qpls(struct gve_priv * priv)749 static int gve_register_xdp_qpls(struct gve_priv *priv)
750 {
751 int start_id;
752 int err;
753 int i;
754
755 start_id = gve_xdp_tx_start_queue_id(priv);
756 for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
757 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
758 /* This failure will trigger a reset - no need to clean up */
759 if (err)
760 return err;
761 }
762 return 0;
763 }
764
gve_register_qpls(struct gve_priv * priv)765 static int gve_register_qpls(struct gve_priv *priv)
766 {
767 int num_tx_qpls, num_rx_qpls;
768 int err;
769 int i;
770
771 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
772 gve_is_qpl(priv));
773 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
774
775 for (i = 0; i < num_tx_qpls; i++) {
776 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
777 if (err)
778 return err;
779 }
780
781 for (i = 0; i < num_rx_qpls; i++) {
782 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
783 if (err)
784 return err;
785 }
786
787 return 0;
788 }
789
gve_unregister_xdp_qpls(struct gve_priv * priv)790 static int gve_unregister_xdp_qpls(struct gve_priv *priv)
791 {
792 int start_id;
793 int err;
794 int i;
795
796 start_id = gve_xdp_tx_start_queue_id(priv);
797 for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
798 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
799 /* This failure will trigger a reset - no need to clean */
800 if (err)
801 return err;
802 }
803 return 0;
804 }
805
gve_unregister_qpls(struct gve_priv * priv)806 static int gve_unregister_qpls(struct gve_priv *priv)
807 {
808 int num_tx_qpls, num_rx_qpls;
809 int err;
810 int i;
811
812 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
813 gve_is_qpl(priv));
814 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
815
816 for (i = 0; i < num_tx_qpls; i++) {
817 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
818 /* This failure will trigger a reset - no need to clean */
819 if (err)
820 return err;
821 }
822
823 for (i = 0; i < num_rx_qpls; i++) {
824 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
825 /* This failure will trigger a reset - no need to clean */
826 if (err)
827 return err;
828 }
829 return 0;
830 }
831
gve_create_xdp_rings(struct gve_priv * priv)832 static int gve_create_xdp_rings(struct gve_priv *priv)
833 {
834 int err;
835
836 err = gve_adminq_create_tx_queues(priv,
837 gve_xdp_tx_start_queue_id(priv),
838 priv->num_xdp_queues);
839 if (err) {
840 netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
841 priv->num_xdp_queues);
842 /* This failure will trigger a reset - no need to clean
843 * up
844 */
845 return err;
846 }
847 netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
848 priv->num_xdp_queues);
849
850 return 0;
851 }
852
gve_create_rings(struct gve_priv * priv)853 static int gve_create_rings(struct gve_priv *priv)
854 {
855 int num_tx_queues = gve_num_tx_queues(priv);
856 int err;
857 int i;
858
859 err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
860 if (err) {
861 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
862 num_tx_queues);
863 /* This failure will trigger a reset - no need to clean
864 * up
865 */
866 return err;
867 }
868 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
869 num_tx_queues);
870
871 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
872 if (err) {
873 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
874 priv->rx_cfg.num_queues);
875 /* This failure will trigger a reset - no need to clean
876 * up
877 */
878 return err;
879 }
880 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
881 priv->rx_cfg.num_queues);
882
883 if (gve_is_gqi(priv)) {
884 /* Rx data ring has been prefilled with packet buffers at queue
885 * allocation time.
886 *
887 * Write the doorbell to provide descriptor slots and packet
888 * buffers to the NIC.
889 */
890 for (i = 0; i < priv->rx_cfg.num_queues; i++)
891 gve_rx_write_doorbell(priv, &priv->rx[i]);
892 } else {
893 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
894 /* Post buffers and ring doorbell. */
895 gve_rx_post_buffers_dqo(&priv->rx[i]);
896 }
897 }
898
899 return 0;
900 }
901
init_xdp_sync_stats(struct gve_priv * priv)902 static void init_xdp_sync_stats(struct gve_priv *priv)
903 {
904 int start_id = gve_xdp_tx_start_queue_id(priv);
905 int i;
906
907 /* Init stats */
908 for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
909 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
910
911 u64_stats_init(&priv->tx[i].statss);
912 priv->tx[i].ntfy_id = ntfy_idx;
913 }
914 }
915
gve_init_sync_stats(struct gve_priv * priv)916 static void gve_init_sync_stats(struct gve_priv *priv)
917 {
918 int i;
919
920 for (i = 0; i < priv->tx_cfg.num_queues; i++)
921 u64_stats_init(&priv->tx[i].statss);
922
923 /* Init stats for XDP TX queues */
924 init_xdp_sync_stats(priv);
925
926 for (i = 0; i < priv->rx_cfg.num_queues; i++)
927 u64_stats_init(&priv->rx[i].statss);
928 }
929
gve_tx_get_curr_alloc_cfg(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)930 static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
931 struct gve_tx_alloc_rings_cfg *cfg)
932 {
933 int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
934
935 cfg->qcfg = &priv->tx_cfg;
936 cfg->raw_addressing = !gve_is_qpl(priv);
937 cfg->ring_size = priv->tx_desc_cnt;
938 cfg->start_idx = 0;
939 cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
940 cfg->tx = priv->tx;
941 }
942
gve_tx_stop_rings(struct gve_priv * priv,int start_id,int num_rings)943 static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
944 {
945 int i;
946
947 if (!priv->tx)
948 return;
949
950 for (i = start_id; i < start_id + num_rings; i++) {
951 if (gve_is_gqi(priv))
952 gve_tx_stop_ring_gqi(priv, i);
953 else
954 gve_tx_stop_ring_dqo(priv, i);
955 }
956 }
957
gve_tx_start_rings(struct gve_priv * priv,int start_id,int num_rings)958 static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
959 int num_rings)
960 {
961 int i;
962
963 for (i = start_id; i < start_id + num_rings; i++) {
964 if (gve_is_gqi(priv))
965 gve_tx_start_ring_gqi(priv, i);
966 else
967 gve_tx_start_ring_dqo(priv, i);
968 }
969 }
970
gve_alloc_xdp_rings(struct gve_priv * priv)971 static int gve_alloc_xdp_rings(struct gve_priv *priv)
972 {
973 struct gve_tx_alloc_rings_cfg cfg = {0};
974 int err = 0;
975
976 if (!priv->num_xdp_queues)
977 return 0;
978
979 gve_tx_get_curr_alloc_cfg(priv, &cfg);
980 cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
981 cfg.num_rings = priv->num_xdp_queues;
982
983 err = gve_tx_alloc_rings_gqi(priv, &cfg);
984 if (err)
985 return err;
986
987 gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
988 init_xdp_sync_stats(priv);
989
990 return 0;
991 }
992
gve_queues_mem_alloc(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)993 static int gve_queues_mem_alloc(struct gve_priv *priv,
994 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
995 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
996 {
997 int err;
998
999 if (gve_is_gqi(priv))
1000 err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
1001 else
1002 err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
1003 if (err)
1004 return err;
1005
1006 if (gve_is_gqi(priv))
1007 err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
1008 else
1009 err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
1010 if (err)
1011 goto free_tx;
1012
1013 return 0;
1014
1015 free_tx:
1016 if (gve_is_gqi(priv))
1017 gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
1018 else
1019 gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
1020 return err;
1021 }
1022
gve_destroy_xdp_rings(struct gve_priv * priv)1023 static int gve_destroy_xdp_rings(struct gve_priv *priv)
1024 {
1025 int start_id;
1026 int err;
1027
1028 start_id = gve_xdp_tx_start_queue_id(priv);
1029 err = gve_adminq_destroy_tx_queues(priv,
1030 start_id,
1031 priv->num_xdp_queues);
1032 if (err) {
1033 netif_err(priv, drv, priv->dev,
1034 "failed to destroy XDP queues\n");
1035 /* This failure will trigger a reset - no need to clean up */
1036 return err;
1037 }
1038 netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
1039
1040 return 0;
1041 }
1042
gve_destroy_rings(struct gve_priv * priv)1043 static int gve_destroy_rings(struct gve_priv *priv)
1044 {
1045 int num_tx_queues = gve_num_tx_queues(priv);
1046 int err;
1047
1048 err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
1049 if (err) {
1050 netif_err(priv, drv, priv->dev,
1051 "failed to destroy tx queues\n");
1052 /* This failure will trigger a reset - no need to clean up */
1053 return err;
1054 }
1055 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
1056 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
1057 if (err) {
1058 netif_err(priv, drv, priv->dev,
1059 "failed to destroy rx queues\n");
1060 /* This failure will trigger a reset - no need to clean up */
1061 return err;
1062 }
1063 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
1064 return 0;
1065 }
1066
gve_free_xdp_rings(struct gve_priv * priv)1067 static void gve_free_xdp_rings(struct gve_priv *priv)
1068 {
1069 struct gve_tx_alloc_rings_cfg cfg = {0};
1070
1071 gve_tx_get_curr_alloc_cfg(priv, &cfg);
1072 cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
1073 cfg.num_rings = priv->num_xdp_queues;
1074
1075 if (priv->tx) {
1076 gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
1077 gve_tx_free_rings_gqi(priv, &cfg);
1078 }
1079 }
1080
gve_queues_mem_free(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_cfg,struct gve_rx_alloc_rings_cfg * rx_cfg)1081 static void gve_queues_mem_free(struct gve_priv *priv,
1082 struct gve_tx_alloc_rings_cfg *tx_cfg,
1083 struct gve_rx_alloc_rings_cfg *rx_cfg)
1084 {
1085 if (gve_is_gqi(priv)) {
1086 gve_tx_free_rings_gqi(priv, tx_cfg);
1087 gve_rx_free_rings_gqi(priv, rx_cfg);
1088 } else {
1089 gve_tx_free_rings_dqo(priv, tx_cfg);
1090 gve_rx_free_rings_dqo(priv, rx_cfg);
1091 }
1092 }
1093
gve_alloc_page(struct gve_priv * priv,struct device * dev,struct page ** page,dma_addr_t * dma,enum dma_data_direction dir,gfp_t gfp_flags)1094 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1095 struct page **page, dma_addr_t *dma,
1096 enum dma_data_direction dir, gfp_t gfp_flags)
1097 {
1098 *page = alloc_page(gfp_flags);
1099 if (!*page) {
1100 priv->page_alloc_fail++;
1101 return -ENOMEM;
1102 }
1103 *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
1104 if (dma_mapping_error(dev, *dma)) {
1105 priv->dma_mapping_error++;
1106 put_page(*page);
1107 return -ENOMEM;
1108 }
1109 return 0;
1110 }
1111
gve_alloc_queue_page_list(struct gve_priv * priv,u32 id,int pages)1112 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1113 u32 id, int pages)
1114 {
1115 struct gve_queue_page_list *qpl;
1116 int err;
1117 int i;
1118
1119 qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
1120 if (!qpl)
1121 return NULL;
1122
1123 qpl->id = id;
1124 qpl->num_entries = 0;
1125 qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
1126 if (!qpl->pages)
1127 goto abort;
1128
1129 qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
1130 if (!qpl->page_buses)
1131 goto abort;
1132
1133 for (i = 0; i < pages; i++) {
1134 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1135 &qpl->page_buses[i],
1136 gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1137 if (err)
1138 goto abort;
1139 qpl->num_entries++;
1140 }
1141
1142 return qpl;
1143
1144 abort:
1145 gve_free_queue_page_list(priv, qpl, id);
1146 return NULL;
1147 }
1148
gve_free_page(struct device * dev,struct page * page,dma_addr_t dma,enum dma_data_direction dir)1149 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1150 enum dma_data_direction dir)
1151 {
1152 if (!dma_mapping_error(dev, dma))
1153 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
1154 if (page)
1155 put_page(page);
1156 }
1157
gve_free_queue_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl,u32 id)1158 void gve_free_queue_page_list(struct gve_priv *priv,
1159 struct gve_queue_page_list *qpl,
1160 u32 id)
1161 {
1162 int i;
1163
1164 if (!qpl)
1165 return;
1166 if (!qpl->pages)
1167 goto free_qpl;
1168 if (!qpl->page_buses)
1169 goto free_pages;
1170
1171 for (i = 0; i < qpl->num_entries; i++)
1172 gve_free_page(&priv->pdev->dev, qpl->pages[i],
1173 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1174
1175 kvfree(qpl->page_buses);
1176 qpl->page_buses = NULL;
1177 free_pages:
1178 kvfree(qpl->pages);
1179 qpl->pages = NULL;
1180 free_qpl:
1181 kvfree(qpl);
1182 }
1183
1184 /* Use this to schedule a reset when the device is capable of continuing
1185 * to handle other requests in its current state. If it is not, do a reset
1186 * in thread instead.
1187 */
gve_schedule_reset(struct gve_priv * priv)1188 void gve_schedule_reset(struct gve_priv *priv)
1189 {
1190 gve_set_do_reset(priv);
1191 queue_work(priv->gve_wq, &priv->service_task);
1192 }
1193
1194 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
1195 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1196 static void gve_turndown(struct gve_priv *priv);
1197 static void gve_turnup(struct gve_priv *priv);
1198
gve_reg_xdp_info(struct gve_priv * priv,struct net_device * dev)1199 static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1200 {
1201 struct napi_struct *napi;
1202 struct gve_rx_ring *rx;
1203 int err = 0;
1204 int i, j;
1205 u32 tx_qid;
1206
1207 if (!priv->num_xdp_queues)
1208 return 0;
1209
1210 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1211 rx = &priv->rx[i];
1212 napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1213
1214 err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
1215 napi->napi_id);
1216 if (err)
1217 goto err;
1218 err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1219 MEM_TYPE_PAGE_SHARED, NULL);
1220 if (err)
1221 goto err;
1222 rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
1223 if (rx->xsk_pool) {
1224 err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
1225 napi->napi_id);
1226 if (err)
1227 goto err;
1228 err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1229 MEM_TYPE_XSK_BUFF_POOL, NULL);
1230 if (err)
1231 goto err;
1232 xsk_pool_set_rxq_info(rx->xsk_pool,
1233 &rx->xsk_rxq);
1234 }
1235 }
1236
1237 for (i = 0; i < priv->num_xdp_queues; i++) {
1238 tx_qid = gve_xdp_tx_queue_id(priv, i);
1239 priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
1240 }
1241 return 0;
1242
1243 err:
1244 for (j = i; j >= 0; j--) {
1245 rx = &priv->rx[j];
1246 if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1247 xdp_rxq_info_unreg(&rx->xdp_rxq);
1248 if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1249 xdp_rxq_info_unreg(&rx->xsk_rxq);
1250 }
1251 return err;
1252 }
1253
gve_unreg_xdp_info(struct gve_priv * priv)1254 static void gve_unreg_xdp_info(struct gve_priv *priv)
1255 {
1256 int i, tx_qid;
1257
1258 if (!priv->num_xdp_queues)
1259 return;
1260
1261 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1262 struct gve_rx_ring *rx = &priv->rx[i];
1263
1264 xdp_rxq_info_unreg(&rx->xdp_rxq);
1265 if (rx->xsk_pool) {
1266 xdp_rxq_info_unreg(&rx->xsk_rxq);
1267 rx->xsk_pool = NULL;
1268 }
1269 }
1270
1271 for (i = 0; i < priv->num_xdp_queues; i++) {
1272 tx_qid = gve_xdp_tx_queue_id(priv, i);
1273 priv->tx[tx_qid].xsk_pool = NULL;
1274 }
1275 }
1276
gve_drain_page_cache(struct gve_priv * priv)1277 static void gve_drain_page_cache(struct gve_priv *priv)
1278 {
1279 int i;
1280
1281 for (i = 0; i < priv->rx_cfg.num_queues; i++)
1282 page_frag_cache_drain(&priv->rx[i].page_cache);
1283 }
1284
gve_rx_get_curr_alloc_cfg(struct gve_priv * priv,struct gve_rx_alloc_rings_cfg * cfg)1285 static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
1286 struct gve_rx_alloc_rings_cfg *cfg)
1287 {
1288 cfg->qcfg = &priv->rx_cfg;
1289 cfg->qcfg_tx = &priv->tx_cfg;
1290 cfg->raw_addressing = !gve_is_qpl(priv);
1291 cfg->enable_header_split = priv->header_split_enabled;
1292 cfg->ring_size = priv->rx_desc_cnt;
1293 cfg->packet_buffer_size = gve_is_gqi(priv) ?
1294 GVE_DEFAULT_RX_BUFFER_SIZE :
1295 priv->data_buffer_size_dqo;
1296 cfg->rx = priv->rx;
1297 }
1298
gve_get_curr_alloc_cfgs(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1299 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1300 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1301 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1302 {
1303 gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
1304 gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
1305 }
1306
gve_rx_start_ring(struct gve_priv * priv,int i)1307 static void gve_rx_start_ring(struct gve_priv *priv, int i)
1308 {
1309 if (gve_is_gqi(priv))
1310 gve_rx_start_ring_gqi(priv, i);
1311 else
1312 gve_rx_start_ring_dqo(priv, i);
1313 }
1314
gve_rx_start_rings(struct gve_priv * priv,int num_rings)1315 static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
1316 {
1317 int i;
1318
1319 for (i = 0; i < num_rings; i++)
1320 gve_rx_start_ring(priv, i);
1321 }
1322
gve_rx_stop_ring(struct gve_priv * priv,int i)1323 static void gve_rx_stop_ring(struct gve_priv *priv, int i)
1324 {
1325 if (gve_is_gqi(priv))
1326 gve_rx_stop_ring_gqi(priv, i);
1327 else
1328 gve_rx_stop_ring_dqo(priv, i);
1329 }
1330
gve_rx_stop_rings(struct gve_priv * priv,int num_rings)1331 static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1332 {
1333 int i;
1334
1335 if (!priv->rx)
1336 return;
1337
1338 for (i = 0; i < num_rings; i++)
1339 gve_rx_stop_ring(priv, i);
1340 }
1341
gve_queues_mem_remove(struct gve_priv * priv)1342 static void gve_queues_mem_remove(struct gve_priv *priv)
1343 {
1344 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1345 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1346
1347 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1348 gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1349 priv->tx = NULL;
1350 priv->rx = NULL;
1351 }
1352
1353 /* The passed-in queue memory is stored into priv and the queues are made live.
1354 * No memory is allocated. Passed-in memory is freed on errors.
1355 */
gve_queues_start(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1356 static int gve_queues_start(struct gve_priv *priv,
1357 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1358 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1359 {
1360 struct net_device *dev = priv->dev;
1361 int err;
1362
1363 /* Record new resources into priv */
1364 priv->tx = tx_alloc_cfg->tx;
1365 priv->rx = rx_alloc_cfg->rx;
1366
1367 /* Record new configs into priv */
1368 priv->tx_cfg = *tx_alloc_cfg->qcfg;
1369 priv->rx_cfg = *rx_alloc_cfg->qcfg;
1370 priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
1371 priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
1372
1373 if (priv->xdp_prog)
1374 priv->num_xdp_queues = priv->rx_cfg.num_queues;
1375 else
1376 priv->num_xdp_queues = 0;
1377
1378 gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
1379 gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
1380 gve_init_sync_stats(priv);
1381
1382 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1383 if (err)
1384 goto stop_and_free_rings;
1385 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1386 if (err)
1387 goto stop_and_free_rings;
1388
1389 err = gve_reg_xdp_info(priv, dev);
1390 if (err)
1391 goto stop_and_free_rings;
1392
1393 err = gve_register_qpls(priv);
1394 if (err)
1395 goto reset;
1396
1397 priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
1398 priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
1399
1400 err = gve_create_rings(priv);
1401 if (err)
1402 goto reset;
1403
1404 gve_set_device_rings_ok(priv);
1405
1406 if (gve_get_report_stats(priv))
1407 mod_timer(&priv->stats_report_timer,
1408 round_jiffies(jiffies +
1409 msecs_to_jiffies(priv->stats_report_timer_period)));
1410
1411 gve_turnup(priv);
1412 queue_work(priv->gve_wq, &priv->service_task);
1413 priv->interface_up_cnt++;
1414 return 0;
1415
1416 reset:
1417 if (gve_get_reset_in_progress(priv))
1418 goto stop_and_free_rings;
1419 gve_reset_and_teardown(priv, true);
1420 /* if this fails there is nothing we can do so just ignore the return */
1421 gve_reset_recovery(priv, false);
1422 /* return the original error */
1423 return err;
1424 stop_and_free_rings:
1425 gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
1426 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1427 gve_queues_mem_remove(priv);
1428 return err;
1429 }
1430
gve_open(struct net_device * dev)1431 static int gve_open(struct net_device *dev)
1432 {
1433 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1434 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1435 struct gve_priv *priv = netdev_priv(dev);
1436 int err;
1437
1438 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1439
1440 err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1441 if (err)
1442 return err;
1443
1444 /* No need to free on error: ownership of resources is lost after
1445 * calling gve_queues_start.
1446 */
1447 err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1448 if (err)
1449 return err;
1450
1451 return 0;
1452 }
1453
gve_queues_stop(struct gve_priv * priv)1454 static int gve_queues_stop(struct gve_priv *priv)
1455 {
1456 int err;
1457
1458 netif_carrier_off(priv->dev);
1459 if (gve_get_device_rings_ok(priv)) {
1460 gve_turndown(priv);
1461 gve_drain_page_cache(priv);
1462 err = gve_destroy_rings(priv);
1463 if (err)
1464 goto err;
1465 err = gve_unregister_qpls(priv);
1466 if (err)
1467 goto err;
1468 gve_clear_device_rings_ok(priv);
1469 }
1470 del_timer_sync(&priv->stats_report_timer);
1471
1472 gve_unreg_xdp_info(priv);
1473
1474 gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
1475 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1476
1477 priv->interface_down_cnt++;
1478 return 0;
1479
1480 err:
1481 /* This must have been called from a reset due to the rtnl lock
1482 * so just return at this point.
1483 */
1484 if (gve_get_reset_in_progress(priv))
1485 return err;
1486 /* Otherwise reset before returning */
1487 gve_reset_and_teardown(priv, true);
1488 return gve_reset_recovery(priv, false);
1489 }
1490
gve_close(struct net_device * dev)1491 static int gve_close(struct net_device *dev)
1492 {
1493 struct gve_priv *priv = netdev_priv(dev);
1494 int err;
1495
1496 err = gve_queues_stop(priv);
1497 if (err)
1498 return err;
1499
1500 gve_queues_mem_remove(priv);
1501 return 0;
1502 }
1503
gve_remove_xdp_queues(struct gve_priv * priv)1504 static int gve_remove_xdp_queues(struct gve_priv *priv)
1505 {
1506 int err;
1507
1508 err = gve_destroy_xdp_rings(priv);
1509 if (err)
1510 return err;
1511
1512 err = gve_unregister_xdp_qpls(priv);
1513 if (err)
1514 return err;
1515
1516 gve_unreg_xdp_info(priv);
1517 gve_free_xdp_rings(priv);
1518
1519 priv->num_xdp_queues = 0;
1520 return 0;
1521 }
1522
gve_add_xdp_queues(struct gve_priv * priv)1523 static int gve_add_xdp_queues(struct gve_priv *priv)
1524 {
1525 int err;
1526
1527 priv->num_xdp_queues = priv->rx_cfg.num_queues;
1528
1529 err = gve_alloc_xdp_rings(priv);
1530 if (err)
1531 goto err;
1532
1533 err = gve_reg_xdp_info(priv, priv->dev);
1534 if (err)
1535 goto free_xdp_rings;
1536
1537 err = gve_register_xdp_qpls(priv);
1538 if (err)
1539 goto free_xdp_rings;
1540
1541 err = gve_create_xdp_rings(priv);
1542 if (err)
1543 goto free_xdp_rings;
1544
1545 return 0;
1546
1547 free_xdp_rings:
1548 gve_free_xdp_rings(priv);
1549 err:
1550 priv->num_xdp_queues = 0;
1551 return err;
1552 }
1553
gve_handle_link_status(struct gve_priv * priv,bool link_status)1554 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1555 {
1556 if (!gve_get_napi_enabled(priv))
1557 return;
1558
1559 if (link_status == netif_carrier_ok(priv->dev))
1560 return;
1561
1562 if (link_status) {
1563 netdev_info(priv->dev, "Device link is up.\n");
1564 netif_carrier_on(priv->dev);
1565 } else {
1566 netdev_info(priv->dev, "Device link is down.\n");
1567 netif_carrier_off(priv->dev);
1568 }
1569 }
1570
gve_set_xdp(struct gve_priv * priv,struct bpf_prog * prog,struct netlink_ext_ack * extack)1571 static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
1572 struct netlink_ext_ack *extack)
1573 {
1574 struct bpf_prog *old_prog;
1575 int err = 0;
1576 u32 status;
1577
1578 old_prog = READ_ONCE(priv->xdp_prog);
1579 if (!netif_running(priv->dev)) {
1580 WRITE_ONCE(priv->xdp_prog, prog);
1581 if (old_prog)
1582 bpf_prog_put(old_prog);
1583 return 0;
1584 }
1585
1586 gve_turndown(priv);
1587 if (!old_prog && prog) {
1588 // Allocate XDP TX queues if an XDP program is
1589 // being installed
1590 err = gve_add_xdp_queues(priv);
1591 if (err)
1592 goto out;
1593 } else if (old_prog && !prog) {
1594 // Remove XDP TX queues if an XDP program is
1595 // being uninstalled
1596 err = gve_remove_xdp_queues(priv);
1597 if (err)
1598 goto out;
1599 }
1600 WRITE_ONCE(priv->xdp_prog, prog);
1601 if (old_prog)
1602 bpf_prog_put(old_prog);
1603
1604 out:
1605 gve_turnup(priv);
1606 status = ioread32be(&priv->reg_bar0->device_status);
1607 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1608 return err;
1609 }
1610
gve_xsk_pool_enable(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)1611 static int gve_xsk_pool_enable(struct net_device *dev,
1612 struct xsk_buff_pool *pool,
1613 u16 qid)
1614 {
1615 struct gve_priv *priv = netdev_priv(dev);
1616 struct napi_struct *napi;
1617 struct gve_rx_ring *rx;
1618 int tx_qid;
1619 int err;
1620
1621 if (qid >= priv->rx_cfg.num_queues) {
1622 dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1623 return -EINVAL;
1624 }
1625 if (xsk_pool_get_rx_frame_size(pool) <
1626 priv->dev->max_mtu + sizeof(struct ethhdr)) {
1627 dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1628 return -EINVAL;
1629 }
1630
1631 err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1632 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1633 if (err)
1634 return err;
1635
1636 /* If XDP prog is not installed or interface is down, return. */
1637 if (!priv->xdp_prog || !netif_running(dev))
1638 return 0;
1639
1640 rx = &priv->rx[qid];
1641 napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1642 err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
1643 if (err)
1644 goto err;
1645
1646 err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1647 MEM_TYPE_XSK_BUFF_POOL, NULL);
1648 if (err)
1649 goto err;
1650
1651 xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
1652 rx->xsk_pool = pool;
1653
1654 tx_qid = gve_xdp_tx_queue_id(priv, qid);
1655 priv->tx[tx_qid].xsk_pool = pool;
1656
1657 return 0;
1658 err:
1659 if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1660 xdp_rxq_info_unreg(&rx->xsk_rxq);
1661
1662 xsk_pool_dma_unmap(pool,
1663 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1664 return err;
1665 }
1666
gve_xsk_pool_disable(struct net_device * dev,u16 qid)1667 static int gve_xsk_pool_disable(struct net_device *dev,
1668 u16 qid)
1669 {
1670 struct gve_priv *priv = netdev_priv(dev);
1671 struct napi_struct *napi_rx;
1672 struct napi_struct *napi_tx;
1673 struct xsk_buff_pool *pool;
1674 int tx_qid;
1675
1676 pool = xsk_get_pool_from_qid(dev, qid);
1677 if (!pool)
1678 return -EINVAL;
1679 if (qid >= priv->rx_cfg.num_queues)
1680 return -EINVAL;
1681
1682 /* If XDP prog is not installed or interface is down, unmap DMA and
1683 * return.
1684 */
1685 if (!priv->xdp_prog || !netif_running(dev))
1686 goto done;
1687
1688 napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1689 napi_disable(napi_rx); /* make sure current rx poll is done */
1690
1691 tx_qid = gve_xdp_tx_queue_id(priv, qid);
1692 napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1693 napi_disable(napi_tx); /* make sure current tx poll is done */
1694
1695 priv->rx[qid].xsk_pool = NULL;
1696 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1697 priv->tx[tx_qid].xsk_pool = NULL;
1698 smp_mb(); /* Make sure it is visible to the workers on datapath */
1699
1700 napi_enable(napi_rx);
1701 if (gve_rx_work_pending(&priv->rx[qid]))
1702 napi_schedule(napi_rx);
1703
1704 napi_enable(napi_tx);
1705 if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1706 napi_schedule(napi_tx);
1707
1708 done:
1709 xsk_pool_dma_unmap(pool,
1710 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1711 return 0;
1712 }
1713
gve_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)1714 static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
1715 {
1716 struct gve_priv *priv = netdev_priv(dev);
1717 struct napi_struct *napi;
1718
1719 if (!gve_get_napi_enabled(priv))
1720 return -ENETDOWN;
1721
1722 if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1723 return -EINVAL;
1724
1725 napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
1726 if (!napi_if_scheduled_mark_missed(napi)) {
1727 /* Call local_bh_enable to trigger SoftIRQ processing */
1728 local_bh_disable();
1729 napi_schedule(napi);
1730 local_bh_enable();
1731 }
1732
1733 return 0;
1734 }
1735
verify_xdp_configuration(struct net_device * dev)1736 static int verify_xdp_configuration(struct net_device *dev)
1737 {
1738 struct gve_priv *priv = netdev_priv(dev);
1739
1740 if (dev->features & NETIF_F_LRO) {
1741 netdev_warn(dev, "XDP is not supported when LRO is on.\n");
1742 return -EOPNOTSUPP;
1743 }
1744
1745 if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
1746 netdev_warn(dev, "XDP is not supported in mode %d.\n",
1747 priv->queue_format);
1748 return -EOPNOTSUPP;
1749 }
1750
1751 if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
1752 netdev_warn(dev, "XDP is not supported for mtu %d.\n",
1753 dev->mtu);
1754 return -EOPNOTSUPP;
1755 }
1756
1757 if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
1758 (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
1759 netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
1760 priv->rx_cfg.num_queues,
1761 priv->tx_cfg.num_queues,
1762 priv->tx_cfg.max_queues);
1763 return -EINVAL;
1764 }
1765 return 0;
1766 }
1767
gve_xdp(struct net_device * dev,struct netdev_bpf * xdp)1768 static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1769 {
1770 struct gve_priv *priv = netdev_priv(dev);
1771 int err;
1772
1773 err = verify_xdp_configuration(dev);
1774 if (err)
1775 return err;
1776 switch (xdp->command) {
1777 case XDP_SETUP_PROG:
1778 return gve_set_xdp(priv, xdp->prog, xdp->extack);
1779 case XDP_SETUP_XSK_POOL:
1780 if (xdp->xsk.pool)
1781 return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1782 else
1783 return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
1784 default:
1785 return -EINVAL;
1786 }
1787 }
1788
gve_flow_rules_reset(struct gve_priv * priv)1789 int gve_flow_rules_reset(struct gve_priv *priv)
1790 {
1791 if (!priv->max_flow_rules)
1792 return 0;
1793
1794 return gve_adminq_reset_flow_rules(priv);
1795 }
1796
gve_adjust_config(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1797 int gve_adjust_config(struct gve_priv *priv,
1798 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1799 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1800 {
1801 int err;
1802
1803 /* Allocate resources for the new confiugration */
1804 err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
1805 if (err) {
1806 netif_err(priv, drv, priv->dev,
1807 "Adjust config failed to alloc new queues");
1808 return err;
1809 }
1810
1811 /* Teardown the device and free existing resources */
1812 err = gve_close(priv->dev);
1813 if (err) {
1814 netif_err(priv, drv, priv->dev,
1815 "Adjust config failed to close old queues");
1816 gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
1817 return err;
1818 }
1819
1820 /* Bring the device back up again with the new resources. */
1821 err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
1822 if (err) {
1823 netif_err(priv, drv, priv->dev,
1824 "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
1825 /* No need to free on error: ownership of resources is lost after
1826 * calling gve_queues_start.
1827 */
1828 gve_turndown(priv);
1829 return err;
1830 }
1831
1832 return 0;
1833 }
1834
gve_adjust_queues(struct gve_priv * priv,struct gve_queue_config new_rx_config,struct gve_queue_config new_tx_config)1835 int gve_adjust_queues(struct gve_priv *priv,
1836 struct gve_queue_config new_rx_config,
1837 struct gve_queue_config new_tx_config)
1838 {
1839 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1840 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1841 int num_xdp_queues;
1842 int err;
1843
1844 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1845
1846 /* Relay the new config from ethtool */
1847 tx_alloc_cfg.qcfg = &new_tx_config;
1848 rx_alloc_cfg.qcfg_tx = &new_tx_config;
1849 rx_alloc_cfg.qcfg = &new_rx_config;
1850 tx_alloc_cfg.num_rings = new_tx_config.num_queues;
1851
1852 /* Add dedicated XDP TX queues if enabled. */
1853 num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
1854 tx_alloc_cfg.num_rings += num_xdp_queues;
1855
1856 if (netif_running(priv->dev)) {
1857 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1858 return err;
1859 }
1860 /* Set the config for the next up. */
1861 priv->tx_cfg = new_tx_config;
1862 priv->rx_cfg = new_rx_config;
1863
1864 return 0;
1865 }
1866
gve_turndown(struct gve_priv * priv)1867 static void gve_turndown(struct gve_priv *priv)
1868 {
1869 int idx;
1870
1871 if (netif_carrier_ok(priv->dev))
1872 netif_carrier_off(priv->dev);
1873
1874 if (!gve_get_napi_enabled(priv))
1875 return;
1876
1877 /* Disable napi to prevent more work from coming in */
1878 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1879 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1880 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1881
1882 if (!gve_tx_was_added_to_block(priv, idx))
1883 continue;
1884 napi_disable(&block->napi);
1885 }
1886 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1887 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1888 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1889
1890 if (!gve_rx_was_added_to_block(priv, idx))
1891 continue;
1892 napi_disable(&block->napi);
1893 }
1894
1895 /* Stop tx queues */
1896 netif_tx_disable(priv->dev);
1897
1898 xdp_features_clear_redirect_target(priv->dev);
1899
1900 gve_clear_napi_enabled(priv);
1901 gve_clear_report_stats(priv);
1902
1903 /* Make sure that all traffic is finished processing. */
1904 synchronize_net();
1905 }
1906
gve_turnup(struct gve_priv * priv)1907 static void gve_turnup(struct gve_priv *priv)
1908 {
1909 int idx;
1910
1911 /* Start the tx queues */
1912 netif_tx_start_all_queues(priv->dev);
1913
1914 /* Enable napi and unmask interrupts for all queues */
1915 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1916 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1917 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1918
1919 if (!gve_tx_was_added_to_block(priv, idx))
1920 continue;
1921
1922 napi_enable(&block->napi);
1923 if (gve_is_gqi(priv)) {
1924 iowrite32be(0, gve_irq_doorbell(priv, block));
1925 } else {
1926 gve_set_itr_coalesce_usecs_dqo(priv, block,
1927 priv->tx_coalesce_usecs);
1928 }
1929
1930 /* Any descs written by the NIC before this barrier will be
1931 * handled by the one-off napi schedule below. Whereas any
1932 * descs after the barrier will generate interrupts.
1933 */
1934 mb();
1935 napi_schedule(&block->napi);
1936 }
1937 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1938 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1939 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1940
1941 if (!gve_rx_was_added_to_block(priv, idx))
1942 continue;
1943
1944 napi_enable(&block->napi);
1945 if (gve_is_gqi(priv)) {
1946 iowrite32be(0, gve_irq_doorbell(priv, block));
1947 } else {
1948 gve_set_itr_coalesce_usecs_dqo(priv, block,
1949 priv->rx_coalesce_usecs);
1950 }
1951
1952 /* Any descs written by the NIC before this barrier will be
1953 * handled by the one-off napi schedule below. Whereas any
1954 * descs after the barrier will generate interrupts.
1955 */
1956 mb();
1957 napi_schedule(&block->napi);
1958 }
1959
1960 if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
1961 xdp_features_set_redirect_target(priv->dev, false);
1962
1963 gve_set_napi_enabled(priv);
1964 }
1965
gve_turnup_and_check_status(struct gve_priv * priv)1966 static void gve_turnup_and_check_status(struct gve_priv *priv)
1967 {
1968 u32 status;
1969
1970 gve_turnup(priv);
1971 status = ioread32be(&priv->reg_bar0->device_status);
1972 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1973 }
1974
gve_get_tx_notify_block(struct gve_priv * priv,unsigned int txqueue)1975 static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
1976 unsigned int txqueue)
1977 {
1978 u32 ntfy_idx;
1979
1980 if (txqueue > priv->tx_cfg.num_queues)
1981 return NULL;
1982
1983 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
1984 if (ntfy_idx >= priv->num_ntfy_blks)
1985 return NULL;
1986
1987 return &priv->ntfy_blocks[ntfy_idx];
1988 }
1989
gve_tx_timeout_try_q_kick(struct gve_priv * priv,unsigned int txqueue)1990 static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
1991 unsigned int txqueue)
1992 {
1993 struct gve_notify_block *block;
1994 u32 current_time;
1995
1996 block = gve_get_tx_notify_block(priv, txqueue);
1997
1998 if (!block)
1999 return false;
2000
2001 current_time = jiffies_to_msecs(jiffies);
2002 if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
2003 return false;
2004
2005 netdev_info(priv->dev, "Kicking queue %d", txqueue);
2006 napi_schedule(&block->napi);
2007 block->tx->last_kick_msec = current_time;
2008 return true;
2009 }
2010
gve_tx_timeout(struct net_device * dev,unsigned int txqueue)2011 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
2012 {
2013 struct gve_notify_block *block;
2014 struct gve_priv *priv;
2015
2016 netdev_info(dev, "Timeout on tx queue, %d", txqueue);
2017 priv = netdev_priv(dev);
2018
2019 if (!gve_tx_timeout_try_q_kick(priv, txqueue))
2020 gve_schedule_reset(priv);
2021
2022 block = gve_get_tx_notify_block(priv, txqueue);
2023 if (block)
2024 block->tx->queue_timeout++;
2025 priv->tx_timeo_cnt++;
2026 }
2027
gve_get_pkt_buf_size(const struct gve_priv * priv,bool enable_hsplit)2028 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
2029 {
2030 if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
2031 return GVE_MAX_RX_BUFFER_SIZE;
2032 else
2033 return GVE_DEFAULT_RX_BUFFER_SIZE;
2034 }
2035
2036 /* header-split is not supported on non-DQO_RDA yet even if device advertises it */
gve_header_split_supported(const struct gve_priv * priv)2037 bool gve_header_split_supported(const struct gve_priv *priv)
2038 {
2039 return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
2040 }
2041
gve_set_hsplit_config(struct gve_priv * priv,u8 tcp_data_split)2042 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
2043 {
2044 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
2045 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
2046 bool enable_hdr_split;
2047 int err = 0;
2048
2049 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
2050 return 0;
2051
2052 if (!gve_header_split_supported(priv)) {
2053 dev_err(&priv->pdev->dev, "Header-split not supported\n");
2054 return -EOPNOTSUPP;
2055 }
2056
2057 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
2058 enable_hdr_split = true;
2059 else
2060 enable_hdr_split = false;
2061
2062 if (enable_hdr_split == priv->header_split_enabled)
2063 return 0;
2064
2065 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2066
2067 rx_alloc_cfg.enable_header_split = enable_hdr_split;
2068 rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
2069
2070 if (netif_running(priv->dev))
2071 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2072 return err;
2073 }
2074
gve_set_features(struct net_device * netdev,netdev_features_t features)2075 static int gve_set_features(struct net_device *netdev,
2076 netdev_features_t features)
2077 {
2078 const netdev_features_t orig_features = netdev->features;
2079 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
2080 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
2081 struct gve_priv *priv = netdev_priv(netdev);
2082 int err;
2083
2084 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2085
2086 if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
2087 netdev->features ^= NETIF_F_LRO;
2088 if (netif_running(netdev)) {
2089 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2090 if (err)
2091 goto revert_features;
2092 }
2093 }
2094 if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
2095 err = gve_flow_rules_reset(priv);
2096 if (err)
2097 goto revert_features;
2098 }
2099
2100 return 0;
2101
2102 revert_features:
2103 netdev->features = orig_features;
2104 return err;
2105 }
2106
2107 static const struct net_device_ops gve_netdev_ops = {
2108 .ndo_start_xmit = gve_start_xmit,
2109 .ndo_features_check = gve_features_check,
2110 .ndo_open = gve_open,
2111 .ndo_stop = gve_close,
2112 .ndo_get_stats64 = gve_get_stats,
2113 .ndo_tx_timeout = gve_tx_timeout,
2114 .ndo_set_features = gve_set_features,
2115 .ndo_bpf = gve_xdp,
2116 .ndo_xdp_xmit = gve_xdp_xmit,
2117 .ndo_xsk_wakeup = gve_xsk_wakeup,
2118 };
2119
gve_handle_status(struct gve_priv * priv,u32 status)2120 static void gve_handle_status(struct gve_priv *priv, u32 status)
2121 {
2122 if (GVE_DEVICE_STATUS_RESET_MASK & status) {
2123 dev_info(&priv->pdev->dev, "Device requested reset.\n");
2124 gve_set_do_reset(priv);
2125 }
2126 if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
2127 priv->stats_report_trigger_cnt++;
2128 gve_set_do_report_stats(priv);
2129 }
2130 }
2131
gve_handle_reset(struct gve_priv * priv)2132 static void gve_handle_reset(struct gve_priv *priv)
2133 {
2134 /* A service task will be scheduled at the end of probe to catch any
2135 * resets that need to happen, and we don't want to reset until
2136 * probe is done.
2137 */
2138 if (gve_get_probe_in_progress(priv))
2139 return;
2140
2141 if (gve_get_do_reset(priv)) {
2142 rtnl_lock();
2143 gve_reset(priv, false);
2144 rtnl_unlock();
2145 }
2146 }
2147
gve_handle_report_stats(struct gve_priv * priv)2148 void gve_handle_report_stats(struct gve_priv *priv)
2149 {
2150 struct stats *stats = priv->stats_report->stats;
2151 int idx, stats_idx = 0;
2152 unsigned int start = 0;
2153 u64 tx_bytes;
2154
2155 if (!gve_get_report_stats(priv))
2156 return;
2157
2158 be64_add_cpu(&priv->stats_report->written_count, 1);
2159 /* tx stats */
2160 if (priv->tx) {
2161 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
2162 u32 last_completion = 0;
2163 u32 tx_frames = 0;
2164
2165 /* DQO doesn't currently support these metrics. */
2166 if (gve_is_gqi(priv)) {
2167 last_completion = priv->tx[idx].done;
2168 tx_frames = priv->tx[idx].req;
2169 }
2170
2171 do {
2172 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
2173 tx_bytes = priv->tx[idx].bytes_done;
2174 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
2175 stats[stats_idx++] = (struct stats) {
2176 .stat_name = cpu_to_be32(TX_WAKE_CNT),
2177 .value = cpu_to_be64(priv->tx[idx].wake_queue),
2178 .queue_id = cpu_to_be32(idx),
2179 };
2180 stats[stats_idx++] = (struct stats) {
2181 .stat_name = cpu_to_be32(TX_STOP_CNT),
2182 .value = cpu_to_be64(priv->tx[idx].stop_queue),
2183 .queue_id = cpu_to_be32(idx),
2184 };
2185 stats[stats_idx++] = (struct stats) {
2186 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
2187 .value = cpu_to_be64(tx_frames),
2188 .queue_id = cpu_to_be32(idx),
2189 };
2190 stats[stats_idx++] = (struct stats) {
2191 .stat_name = cpu_to_be32(TX_BYTES_SENT),
2192 .value = cpu_to_be64(tx_bytes),
2193 .queue_id = cpu_to_be32(idx),
2194 };
2195 stats[stats_idx++] = (struct stats) {
2196 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
2197 .value = cpu_to_be64(last_completion),
2198 .queue_id = cpu_to_be32(idx),
2199 };
2200 stats[stats_idx++] = (struct stats) {
2201 .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
2202 .value = cpu_to_be64(priv->tx[idx].queue_timeout),
2203 .queue_id = cpu_to_be32(idx),
2204 };
2205 }
2206 }
2207 /* rx stats */
2208 if (priv->rx) {
2209 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
2210 stats[stats_idx++] = (struct stats) {
2211 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
2212 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
2213 .queue_id = cpu_to_be32(idx),
2214 };
2215 stats[stats_idx++] = (struct stats) {
2216 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
2217 .value = cpu_to_be64(priv->rx[idx].fill_cnt),
2218 .queue_id = cpu_to_be32(idx),
2219 };
2220 }
2221 }
2222 }
2223
2224 /* Handle NIC status register changes, reset requests and report stats */
gve_service_task(struct work_struct * work)2225 static void gve_service_task(struct work_struct *work)
2226 {
2227 struct gve_priv *priv = container_of(work, struct gve_priv,
2228 service_task);
2229 u32 status = ioread32be(&priv->reg_bar0->device_status);
2230
2231 gve_handle_status(priv, status);
2232
2233 gve_handle_reset(priv);
2234 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
2235 }
2236
gve_set_netdev_xdp_features(struct gve_priv * priv)2237 static void gve_set_netdev_xdp_features(struct gve_priv *priv)
2238 {
2239 xdp_features_t xdp_features;
2240
2241 if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
2242 xdp_features = NETDEV_XDP_ACT_BASIC;
2243 xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2244 xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2245 } else {
2246 xdp_features = 0;
2247 }
2248
2249 xdp_set_features_flag(priv->dev, xdp_features);
2250 }
2251
gve_init_priv(struct gve_priv * priv,bool skip_describe_device)2252 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2253 {
2254 int num_ntfy;
2255 int err;
2256
2257 /* Set up the adminq */
2258 err = gve_adminq_alloc(&priv->pdev->dev, priv);
2259 if (err) {
2260 dev_err(&priv->pdev->dev,
2261 "Failed to alloc admin queue: err=%d\n", err);
2262 return err;
2263 }
2264
2265 err = gve_verify_driver_compatibility(priv);
2266 if (err) {
2267 dev_err(&priv->pdev->dev,
2268 "Could not verify driver compatibility: err=%d\n", err);
2269 goto err;
2270 }
2271
2272 priv->num_registered_pages = 0;
2273
2274 if (skip_describe_device)
2275 goto setup_device;
2276
2277 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2278 /* Get the initial information we need from the device */
2279 err = gve_adminq_describe_device(priv);
2280 if (err) {
2281 dev_err(&priv->pdev->dev,
2282 "Could not get device information: err=%d\n", err);
2283 goto err;
2284 }
2285 priv->dev->mtu = priv->dev->max_mtu;
2286 num_ntfy = pci_msix_vec_count(priv->pdev);
2287 if (num_ntfy <= 0) {
2288 dev_err(&priv->pdev->dev,
2289 "could not count MSI-x vectors: err=%d\n", num_ntfy);
2290 err = num_ntfy;
2291 goto err;
2292 } else if (num_ntfy < GVE_MIN_MSIX) {
2293 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2294 GVE_MIN_MSIX, num_ntfy);
2295 err = -EINVAL;
2296 goto err;
2297 }
2298
2299 /* Big TCP is only supported on DQ*/
2300 if (!gve_is_gqi(priv))
2301 netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2302
2303 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2304 /* gvnic has one Notification Block per MSI-x vector, except for the
2305 * management vector
2306 */
2307 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2308 priv->mgmt_msix_idx = priv->num_ntfy_blks;
2309
2310 priv->tx_cfg.max_queues =
2311 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2312 priv->rx_cfg.max_queues =
2313 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2314
2315 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2316 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2317 if (priv->default_num_queues > 0) {
2318 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2319 priv->tx_cfg.num_queues);
2320 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2321 priv->rx_cfg.num_queues);
2322 }
2323
2324 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2325 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
2326 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2327 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2328
2329 if (!gve_is_gqi(priv)) {
2330 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
2331 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
2332 }
2333
2334 setup_device:
2335 gve_set_netdev_xdp_features(priv);
2336 err = gve_setup_device_resources(priv);
2337 if (!err)
2338 return 0;
2339 err:
2340 gve_adminq_free(&priv->pdev->dev, priv);
2341 return err;
2342 }
2343
gve_teardown_priv_resources(struct gve_priv * priv)2344 static void gve_teardown_priv_resources(struct gve_priv *priv)
2345 {
2346 gve_teardown_device_resources(priv);
2347 gve_adminq_free(&priv->pdev->dev, priv);
2348 }
2349
gve_trigger_reset(struct gve_priv * priv)2350 static void gve_trigger_reset(struct gve_priv *priv)
2351 {
2352 /* Reset the device by releasing the AQ */
2353 gve_adminq_release(priv);
2354 }
2355
gve_reset_and_teardown(struct gve_priv * priv,bool was_up)2356 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
2357 {
2358 gve_trigger_reset(priv);
2359 /* With the reset having already happened, close cannot fail */
2360 if (was_up)
2361 gve_close(priv->dev);
2362 gve_teardown_priv_resources(priv);
2363 }
2364
gve_reset_recovery(struct gve_priv * priv,bool was_up)2365 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
2366 {
2367 int err;
2368
2369 err = gve_init_priv(priv, true);
2370 if (err)
2371 goto err;
2372 if (was_up) {
2373 err = gve_open(priv->dev);
2374 if (err)
2375 goto err;
2376 }
2377 return 0;
2378 err:
2379 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
2380 gve_turndown(priv);
2381 return err;
2382 }
2383
gve_reset(struct gve_priv * priv,bool attempt_teardown)2384 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
2385 {
2386 bool was_up = netif_running(priv->dev);
2387 int err;
2388
2389 dev_info(&priv->pdev->dev, "Performing reset\n");
2390 gve_clear_do_reset(priv);
2391 gve_set_reset_in_progress(priv);
2392 /* If we aren't attempting to teardown normally, just go turndown and
2393 * reset right away.
2394 */
2395 if (!attempt_teardown) {
2396 gve_turndown(priv);
2397 gve_reset_and_teardown(priv, was_up);
2398 } else {
2399 /* Otherwise attempt to close normally */
2400 if (was_up) {
2401 err = gve_close(priv->dev);
2402 /* If that fails reset as we did above */
2403 if (err)
2404 gve_reset_and_teardown(priv, was_up);
2405 }
2406 /* Clean up any remaining resources */
2407 gve_teardown_priv_resources(priv);
2408 }
2409
2410 /* Set it all back up */
2411 err = gve_reset_recovery(priv, was_up);
2412 gve_clear_reset_in_progress(priv);
2413 priv->reset_cnt++;
2414 priv->interface_up_cnt = 0;
2415 priv->interface_down_cnt = 0;
2416 priv->stats_report_trigger_cnt = 0;
2417 return err;
2418 }
2419
gve_write_version(u8 __iomem * driver_version_register)2420 static void gve_write_version(u8 __iomem *driver_version_register)
2421 {
2422 const char *c = gve_version_prefix;
2423
2424 while (*c) {
2425 writeb(*c, driver_version_register);
2426 c++;
2427 }
2428
2429 c = gve_version_str;
2430 while (*c) {
2431 writeb(*c, driver_version_register);
2432 c++;
2433 }
2434 writeb('\n', driver_version_register);
2435 }
2436
gve_rx_queue_stop(struct net_device * dev,void * per_q_mem,int idx)2437 static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
2438 {
2439 struct gve_priv *priv = netdev_priv(dev);
2440 struct gve_rx_ring *gve_per_q_mem;
2441 int err;
2442
2443 if (!priv->rx)
2444 return -EAGAIN;
2445
2446 /* Destroying queue 0 while other queues exist is not supported in DQO */
2447 if (!gve_is_gqi(priv) && idx == 0)
2448 return -ERANGE;
2449
2450 /* Single-queue destruction requires quiescence on all queues */
2451 gve_turndown(priv);
2452
2453 /* This failure will trigger a reset - no need to clean up */
2454 err = gve_adminq_destroy_single_rx_queue(priv, idx);
2455 if (err)
2456 return err;
2457
2458 if (gve_is_qpl(priv)) {
2459 /* This failure will trigger a reset - no need to clean up */
2460 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
2461 if (err)
2462 return err;
2463 }
2464
2465 gve_rx_stop_ring(priv, idx);
2466
2467 /* Turn the unstopped queues back up */
2468 gve_turnup_and_check_status(priv);
2469
2470 gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2471 *gve_per_q_mem = priv->rx[idx];
2472 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2473 return 0;
2474 }
2475
gve_rx_queue_mem_free(struct net_device * dev,void * per_q_mem)2476 static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
2477 {
2478 struct gve_priv *priv = netdev_priv(dev);
2479 struct gve_rx_alloc_rings_cfg cfg = {0};
2480 struct gve_rx_ring *gve_per_q_mem;
2481
2482 gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2483 gve_rx_get_curr_alloc_cfg(priv, &cfg);
2484
2485 if (gve_is_gqi(priv))
2486 gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
2487 else
2488 gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
2489 }
2490
gve_rx_queue_mem_alloc(struct net_device * dev,void * per_q_mem,int idx)2491 static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
2492 int idx)
2493 {
2494 struct gve_priv *priv = netdev_priv(dev);
2495 struct gve_rx_alloc_rings_cfg cfg = {0};
2496 struct gve_rx_ring *gve_per_q_mem;
2497 int err;
2498
2499 if (!priv->rx)
2500 return -EAGAIN;
2501
2502 gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2503 gve_rx_get_curr_alloc_cfg(priv, &cfg);
2504
2505 if (gve_is_gqi(priv))
2506 err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
2507 else
2508 err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
2509
2510 return err;
2511 }
2512
gve_rx_queue_start(struct net_device * dev,void * per_q_mem,int idx)2513 static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
2514 {
2515 struct gve_priv *priv = netdev_priv(dev);
2516 struct gve_rx_ring *gve_per_q_mem;
2517 int err;
2518
2519 if (!priv->rx)
2520 return -EAGAIN;
2521
2522 gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2523 priv->rx[idx] = *gve_per_q_mem;
2524
2525 /* Single-queue creation requires quiescence on all queues */
2526 gve_turndown(priv);
2527
2528 gve_rx_start_ring(priv, idx);
2529
2530 if (gve_is_qpl(priv)) {
2531 /* This failure will trigger a reset - no need to clean up */
2532 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
2533 if (err)
2534 goto abort;
2535 }
2536
2537 /* This failure will trigger a reset - no need to clean up */
2538 err = gve_adminq_create_single_rx_queue(priv, idx);
2539 if (err)
2540 goto abort;
2541
2542 if (gve_is_gqi(priv))
2543 gve_rx_write_doorbell(priv, &priv->rx[idx]);
2544 else
2545 gve_rx_post_buffers_dqo(&priv->rx[idx]);
2546
2547 /* Turn the unstopped queues back up */
2548 gve_turnup_and_check_status(priv);
2549 return 0;
2550
2551 abort:
2552 gve_rx_stop_ring(priv, idx);
2553
2554 /* All failures in this func result in a reset, by clearing the struct
2555 * at idx, we prevent a double free when that reset runs. The reset,
2556 * which needs the rtnl lock, will not run till this func returns and
2557 * its caller gives up the lock.
2558 */
2559 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2560 return err;
2561 }
2562
2563 static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
2564 .ndo_queue_mem_size = sizeof(struct gve_rx_ring),
2565 .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc,
2566 .ndo_queue_mem_free = gve_rx_queue_mem_free,
2567 .ndo_queue_start = gve_rx_queue_start,
2568 .ndo_queue_stop = gve_rx_queue_stop,
2569 };
2570
gve_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2571 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2572 {
2573 int max_tx_queues, max_rx_queues;
2574 struct net_device *dev;
2575 __be32 __iomem *db_bar;
2576 struct gve_registers __iomem *reg_bar;
2577 struct gve_priv *priv;
2578 int err;
2579
2580 err = pci_enable_device(pdev);
2581 if (err)
2582 return err;
2583
2584 err = pci_request_regions(pdev, gve_driver_name);
2585 if (err)
2586 goto abort_with_enabled;
2587
2588 pci_set_master(pdev);
2589
2590 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2591 if (err) {
2592 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
2593 goto abort_with_pci_region;
2594 }
2595
2596 reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
2597 if (!reg_bar) {
2598 dev_err(&pdev->dev, "Failed to map pci bar!\n");
2599 err = -ENOMEM;
2600 goto abort_with_pci_region;
2601 }
2602
2603 db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
2604 if (!db_bar) {
2605 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
2606 err = -ENOMEM;
2607 goto abort_with_reg_bar;
2608 }
2609
2610 gve_write_version(®_bar->driver_version);
2611 /* Get max queues to alloc etherdev */
2612 max_tx_queues = ioread32be(®_bar->max_tx_queues);
2613 max_rx_queues = ioread32be(®_bar->max_rx_queues);
2614 /* Alloc and setup the netdev and priv */
2615 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2616 if (!dev) {
2617 dev_err(&pdev->dev, "could not allocate netdev\n");
2618 err = -ENOMEM;
2619 goto abort_with_db_bar;
2620 }
2621 SET_NETDEV_DEV(dev, &pdev->dev);
2622 pci_set_drvdata(pdev, dev);
2623 dev->ethtool_ops = &gve_ethtool_ops;
2624 dev->netdev_ops = &gve_netdev_ops;
2625 dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
2626
2627 /* Set default and supported features.
2628 *
2629 * Features might be set in other locations as well (such as
2630 * `gve_adminq_describe_device`).
2631 */
2632 dev->hw_features = NETIF_F_HIGHDMA;
2633 dev->hw_features |= NETIF_F_SG;
2634 dev->hw_features |= NETIF_F_HW_CSUM;
2635 dev->hw_features |= NETIF_F_TSO;
2636 dev->hw_features |= NETIF_F_TSO6;
2637 dev->hw_features |= NETIF_F_TSO_ECN;
2638 dev->hw_features |= NETIF_F_RXCSUM;
2639 dev->hw_features |= NETIF_F_RXHASH;
2640 dev->features = dev->hw_features;
2641 dev->watchdog_timeo = 5 * HZ;
2642 dev->min_mtu = ETH_MIN_MTU;
2643 netif_carrier_off(dev);
2644
2645 priv = netdev_priv(dev);
2646 priv->dev = dev;
2647 priv->pdev = pdev;
2648 priv->msg_enable = DEFAULT_MSG_LEVEL;
2649 priv->reg_bar0 = reg_bar;
2650 priv->db_bar2 = db_bar;
2651 priv->service_task_flags = 0x0;
2652 priv->state_flags = 0x0;
2653 priv->ethtool_flags = 0x0;
2654 priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
2655 priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
2656
2657 gve_set_probe_in_progress(priv);
2658 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
2659 if (!priv->gve_wq) {
2660 dev_err(&pdev->dev, "Could not allocate workqueue");
2661 err = -ENOMEM;
2662 goto abort_with_netdev;
2663 }
2664 INIT_WORK(&priv->service_task, gve_service_task);
2665 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2666 priv->tx_cfg.max_queues = max_tx_queues;
2667 priv->rx_cfg.max_queues = max_rx_queues;
2668
2669 err = gve_init_priv(priv, false);
2670 if (err)
2671 goto abort_with_wq;
2672
2673 err = register_netdev(dev);
2674 if (err)
2675 goto abort_with_gve_init;
2676
2677 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
2678 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
2679 gve_clear_probe_in_progress(priv);
2680 queue_work(priv->gve_wq, &priv->service_task);
2681 return 0;
2682
2683 abort_with_gve_init:
2684 gve_teardown_priv_resources(priv);
2685
2686 abort_with_wq:
2687 destroy_workqueue(priv->gve_wq);
2688
2689 abort_with_netdev:
2690 free_netdev(dev);
2691
2692 abort_with_db_bar:
2693 pci_iounmap(pdev, db_bar);
2694
2695 abort_with_reg_bar:
2696 pci_iounmap(pdev, reg_bar);
2697
2698 abort_with_pci_region:
2699 pci_release_regions(pdev);
2700
2701 abort_with_enabled:
2702 pci_disable_device(pdev);
2703 return err;
2704 }
2705
gve_remove(struct pci_dev * pdev)2706 static void gve_remove(struct pci_dev *pdev)
2707 {
2708 struct net_device *netdev = pci_get_drvdata(pdev);
2709 struct gve_priv *priv = netdev_priv(netdev);
2710 __be32 __iomem *db_bar = priv->db_bar2;
2711 void __iomem *reg_bar = priv->reg_bar0;
2712
2713 unregister_netdev(netdev);
2714 gve_teardown_priv_resources(priv);
2715 destroy_workqueue(priv->gve_wq);
2716 free_netdev(netdev);
2717 pci_iounmap(pdev, db_bar);
2718 pci_iounmap(pdev, reg_bar);
2719 pci_release_regions(pdev);
2720 pci_disable_device(pdev);
2721 }
2722
gve_shutdown(struct pci_dev * pdev)2723 static void gve_shutdown(struct pci_dev *pdev)
2724 {
2725 struct net_device *netdev = pci_get_drvdata(pdev);
2726 struct gve_priv *priv = netdev_priv(netdev);
2727 bool was_up = netif_running(priv->dev);
2728
2729 netif_device_detach(netdev);
2730
2731 rtnl_lock();
2732 if (was_up && gve_close(priv->dev)) {
2733 /* If the dev was up, attempt to close, if close fails, reset */
2734 gve_reset_and_teardown(priv, was_up);
2735 } else {
2736 /* If the dev wasn't up or close worked, finish tearing down */
2737 gve_teardown_priv_resources(priv);
2738 }
2739 rtnl_unlock();
2740 }
2741
2742 #ifdef CONFIG_PM
gve_suspend(struct pci_dev * pdev,pm_message_t state)2743 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
2744 {
2745 struct net_device *netdev = pci_get_drvdata(pdev);
2746 struct gve_priv *priv = netdev_priv(netdev);
2747 bool was_up = netif_running(priv->dev);
2748
2749 priv->suspend_cnt++;
2750 rtnl_lock();
2751 if (was_up && gve_close(priv->dev)) {
2752 /* If the dev was up, attempt to close, if close fails, reset */
2753 gve_reset_and_teardown(priv, was_up);
2754 } else {
2755 /* If the dev wasn't up or close worked, finish tearing down */
2756 gve_teardown_priv_resources(priv);
2757 }
2758 priv->up_before_suspend = was_up;
2759 rtnl_unlock();
2760 return 0;
2761 }
2762
gve_resume(struct pci_dev * pdev)2763 static int gve_resume(struct pci_dev *pdev)
2764 {
2765 struct net_device *netdev = pci_get_drvdata(pdev);
2766 struct gve_priv *priv = netdev_priv(netdev);
2767 int err;
2768
2769 priv->resume_cnt++;
2770 rtnl_lock();
2771 err = gve_reset_recovery(priv, priv->up_before_suspend);
2772 rtnl_unlock();
2773 return err;
2774 }
2775 #endif /* CONFIG_PM */
2776
2777 static const struct pci_device_id gve_id_table[] = {
2778 { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
2779 { }
2780 };
2781
2782 static struct pci_driver gve_driver = {
2783 .name = gve_driver_name,
2784 .id_table = gve_id_table,
2785 .probe = gve_probe,
2786 .remove = gve_remove,
2787 .shutdown = gve_shutdown,
2788 #ifdef CONFIG_PM
2789 .suspend = gve_suspend,
2790 .resume = gve_resume,
2791 #endif
2792 };
2793
2794 module_pci_driver(gve_driver);
2795
2796 MODULE_DEVICE_TABLE(pci, gve_id_table);
2797 MODULE_AUTHOR("Google, Inc.");
2798 MODULE_DESCRIPTION("Google Virtual NIC Driver");
2799 MODULE_LICENSE("Dual MIT/GPL");
2800 MODULE_VERSION(GVE_VERSION);
2801