• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include "efx_channels.h"
14 #include "efx.h"
15 #include "efx_common.h"
16 #include "tx_common.h"
17 #include "rx_common.h"
18 #include "nic.h"
19 #include "sriov.h"
20 
21 /* This is the first interrupt mode to try out of:
22  * 0 => MSI-X
23  * 1 => MSI
24  * 2 => legacy
25  */
26 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
27 
28 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
29  * i.e. the number of CPUs among which we may distribute simultaneous
30  * interrupt handling.
31  *
32  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
33  * The default (0) means to assign an interrupt to each core.
34  */
35 unsigned int rss_cpus;
36 
37 static unsigned int irq_adapt_low_thresh = 8000;
38 module_param(irq_adapt_low_thresh, uint, 0644);
39 MODULE_PARM_DESC(irq_adapt_low_thresh,
40 		 "Threshold score for reducing IRQ moderation");
41 
42 static unsigned int irq_adapt_high_thresh = 16000;
43 module_param(irq_adapt_high_thresh, uint, 0644);
44 MODULE_PARM_DESC(irq_adapt_high_thresh,
45 		 "Threshold score for increasing IRQ moderation");
46 
47 /* This is the weight assigned to each of the (per-channel) virtual
48  * NAPI devices.
49  */
50 static int napi_weight = 64;
51 
52 /***************
53  * Housekeeping
54  ***************/
55 
efx_channel_dummy_op_int(struct efx_channel * channel)56 int efx_channel_dummy_op_int(struct efx_channel *channel)
57 {
58 	return 0;
59 }
60 
efx_channel_dummy_op_void(struct efx_channel * channel)61 void efx_channel_dummy_op_void(struct efx_channel *channel)
62 {
63 }
64 
65 static const struct efx_channel_type efx_default_channel_type = {
66 	.pre_probe		= efx_channel_dummy_op_int,
67 	.post_remove		= efx_channel_dummy_op_void,
68 	.get_name		= efx_get_channel_name,
69 	.copy			= efx_copy_channel,
70 	.want_txqs		= efx_default_channel_want_txqs,
71 	.keep_eventq		= false,
72 	.want_pio		= true,
73 };
74 
75 /*************
76  * INTERRUPTS
77  *************/
78 
efx_wanted_parallelism(struct efx_nic * efx)79 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
80 {
81 	cpumask_var_t thread_mask;
82 	unsigned int count;
83 	int cpu;
84 
85 	if (rss_cpus) {
86 		count = rss_cpus;
87 	} else {
88 		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
89 			netif_warn(efx, probe, efx->net_dev,
90 				   "RSS disabled due to allocation failure\n");
91 			return 1;
92 		}
93 
94 		count = 0;
95 		for_each_online_cpu(cpu) {
96 			if (!cpumask_test_cpu(cpu, thread_mask)) {
97 				++count;
98 				cpumask_or(thread_mask, thread_mask,
99 					   topology_sibling_cpumask(cpu));
100 			}
101 		}
102 
103 		free_cpumask_var(thread_mask);
104 	}
105 
106 	if (count > EFX_MAX_RX_QUEUES) {
107 		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
108 			       "Reducing number of rx queues from %u to %u.\n",
109 			       count, EFX_MAX_RX_QUEUES);
110 		count = EFX_MAX_RX_QUEUES;
111 	}
112 
113 	/* If RSS is requested for the PF *and* VFs then we can't write RSS
114 	 * table entries that are inaccessible to VFs
115 	 */
116 #ifdef CONFIG_SFC_SRIOV
117 	if (efx->type->sriov_wanted) {
118 		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
119 		    count > efx_vf_size(efx)) {
120 			netif_warn(efx, probe, efx->net_dev,
121 				   "Reducing number of RSS channels from %u to %u for "
122 				   "VF support. Increase vf-msix-limit to use more "
123 				   "channels on the PF.\n",
124 				   count, efx_vf_size(efx));
125 			count = efx_vf_size(efx);
126 		}
127 	}
128 #endif
129 
130 	return count;
131 }
132 
efx_allocate_msix_channels(struct efx_nic * efx,unsigned int max_channels,unsigned int extra_channels,unsigned int parallelism)133 static int efx_allocate_msix_channels(struct efx_nic *efx,
134 				      unsigned int max_channels,
135 				      unsigned int extra_channels,
136 				      unsigned int parallelism)
137 {
138 	unsigned int n_channels = parallelism;
139 	int vec_count;
140 	int n_xdp_tx;
141 	int n_xdp_ev;
142 
143 	if (efx_separate_tx_channels)
144 		n_channels *= 2;
145 	n_channels += extra_channels;
146 
147 	/* To allow XDP transmit to happen from arbitrary NAPI contexts
148 	 * we allocate a TX queue per CPU. We share event queues across
149 	 * multiple tx queues, assuming tx and ev queues are both
150 	 * maximum size.
151 	 */
152 
153 	n_xdp_tx = num_possible_cpus();
154 	n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);
155 
156 	vec_count = pci_msix_vec_count(efx->pci_dev);
157 	if (vec_count < 0)
158 		return vec_count;
159 
160 	max_channels = min_t(unsigned int, vec_count, max_channels);
161 
162 	/* Check resources.
163 	 * We need a channel per event queue, plus a VI per tx queue.
164 	 * This may be more pessimistic than it needs to be.
165 	 */
166 	if (n_channels + n_xdp_ev > max_channels) {
167 		netif_err(efx, drv, efx->net_dev,
168 			  "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
169 			  n_xdp_ev, n_channels, max_channels);
170 		efx->n_xdp_channels = 0;
171 		efx->xdp_tx_per_channel = 0;
172 		efx->xdp_tx_queue_count = 0;
173 	} else if (n_channels + n_xdp_tx > efx->max_vis) {
174 		netif_err(efx, drv, efx->net_dev,
175 			  "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
176 			  n_xdp_tx, n_channels, efx->max_vis);
177 		efx->n_xdp_channels = 0;
178 		efx->xdp_tx_per_channel = 0;
179 		efx->xdp_tx_queue_count = 0;
180 	} else {
181 		efx->n_xdp_channels = n_xdp_ev;
182 		efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
183 		efx->xdp_tx_queue_count = n_xdp_tx;
184 		n_channels += n_xdp_ev;
185 		netif_dbg(efx, drv, efx->net_dev,
186 			  "Allocating %d TX and %d event queues for XDP\n",
187 			  n_xdp_tx, n_xdp_ev);
188 	}
189 
190 	if (vec_count < n_channels) {
191 		netif_err(efx, drv, efx->net_dev,
192 			  "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
193 			  vec_count, n_channels);
194 		netif_err(efx, drv, efx->net_dev,
195 			  "WARNING: Performance may be reduced.\n");
196 		n_channels = vec_count;
197 	}
198 
199 	n_channels = min(n_channels, max_channels);
200 
201 	efx->n_channels = n_channels;
202 
203 	/* Ignore XDP tx channels when creating rx channels. */
204 	n_channels -= efx->n_xdp_channels;
205 
206 	if (efx_separate_tx_channels) {
207 		efx->n_tx_channels =
208 			min(max(n_channels / 2, 1U),
209 			    efx->max_tx_channels);
210 		efx->tx_channel_offset =
211 			n_channels - efx->n_tx_channels;
212 		efx->n_rx_channels =
213 			max(n_channels -
214 			    efx->n_tx_channels, 1U);
215 	} else {
216 		efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
217 		efx->tx_channel_offset = 0;
218 		efx->n_rx_channels = n_channels;
219 	}
220 
221 	efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
222 	efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
223 
224 	efx->xdp_channel_offset = n_channels;
225 
226 	netif_dbg(efx, drv, efx->net_dev,
227 		  "Allocating %u RX channels\n",
228 		  efx->n_rx_channels);
229 
230 	return efx->n_channels;
231 }
232 
233 /* Probe the number and type of interrupts we are able to obtain, and
234  * the resulting numbers of channels and RX queues.
235  */
efx_probe_interrupts(struct efx_nic * efx)236 int efx_probe_interrupts(struct efx_nic *efx)
237 {
238 	unsigned int extra_channels = 0;
239 	unsigned int rss_spread;
240 	unsigned int i, j;
241 	int rc;
242 
243 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
244 		if (efx->extra_channel_type[i])
245 			++extra_channels;
246 
247 	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
248 		unsigned int parallelism = efx_wanted_parallelism(efx);
249 		struct msix_entry xentries[EFX_MAX_CHANNELS];
250 		unsigned int n_channels;
251 
252 		rc = efx_allocate_msix_channels(efx, efx->max_channels,
253 						extra_channels, parallelism);
254 		if (rc >= 0) {
255 			n_channels = rc;
256 			for (i = 0; i < n_channels; i++)
257 				xentries[i].entry = i;
258 			rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
259 						   n_channels);
260 		}
261 		if (rc < 0) {
262 			/* Fall back to single channel MSI */
263 			netif_err(efx, drv, efx->net_dev,
264 				  "could not enable MSI-X\n");
265 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
266 				efx->interrupt_mode = EFX_INT_MODE_MSI;
267 			else
268 				return rc;
269 		} else if (rc < n_channels) {
270 			netif_err(efx, drv, efx->net_dev,
271 				  "WARNING: Insufficient MSI-X vectors"
272 				  " available (%d < %u).\n", rc, n_channels);
273 			netif_err(efx, drv, efx->net_dev,
274 				  "WARNING: Performance may be reduced.\n");
275 			n_channels = rc;
276 		}
277 
278 		if (rc > 0) {
279 			for (i = 0; i < efx->n_channels; i++)
280 				efx_get_channel(efx, i)->irq =
281 					xentries[i].vector;
282 		}
283 	}
284 
285 	/* Try single interrupt MSI */
286 	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
287 		efx->n_channels = 1;
288 		efx->n_rx_channels = 1;
289 		efx->n_tx_channels = 1;
290 		efx->n_xdp_channels = 0;
291 		efx->xdp_channel_offset = efx->n_channels;
292 		rc = pci_enable_msi(efx->pci_dev);
293 		if (rc == 0) {
294 			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
295 		} else {
296 			netif_err(efx, drv, efx->net_dev,
297 				  "could not enable MSI\n");
298 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
299 				efx->interrupt_mode = EFX_INT_MODE_LEGACY;
300 			else
301 				return rc;
302 		}
303 	}
304 
305 	/* Assume legacy interrupts */
306 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
307 		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
308 		efx->n_rx_channels = 1;
309 		efx->n_tx_channels = 1;
310 		efx->n_xdp_channels = 0;
311 		efx->xdp_channel_offset = efx->n_channels;
312 		efx->legacy_irq = efx->pci_dev->irq;
313 	}
314 
315 	/* Assign extra channels if possible, before XDP channels */
316 	efx->n_extra_tx_channels = 0;
317 	j = efx->xdp_channel_offset;
318 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
319 		if (!efx->extra_channel_type[i])
320 			continue;
321 		if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
322 			efx->extra_channel_type[i]->handle_no_channel(efx);
323 		} else {
324 			--j;
325 			efx_get_channel(efx, j)->type =
326 				efx->extra_channel_type[i];
327 			if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
328 				efx->n_extra_tx_channels++;
329 		}
330 	}
331 
332 	rss_spread = efx->n_rx_channels;
333 	/* RSS might be usable on VFs even if it is disabled on the PF */
334 #ifdef CONFIG_SFC_SRIOV
335 	if (efx->type->sriov_wanted) {
336 		efx->rss_spread = ((rss_spread > 1 ||
337 				    !efx->type->sriov_wanted(efx)) ?
338 				   rss_spread : efx_vf_size(efx));
339 		return 0;
340 	}
341 #endif
342 	efx->rss_spread = rss_spread;
343 
344 	return 0;
345 }
346 
347 #if defined(CONFIG_SMP)
efx_set_interrupt_affinity(struct efx_nic * efx)348 void efx_set_interrupt_affinity(struct efx_nic *efx)
349 {
350 	struct efx_channel *channel;
351 	unsigned int cpu;
352 
353 	efx_for_each_channel(channel, efx) {
354 		cpu = cpumask_local_spread(channel->channel,
355 					   pcibus_to_node(efx->pci_dev->bus));
356 		irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
357 	}
358 }
359 
efx_clear_interrupt_affinity(struct efx_nic * efx)360 void efx_clear_interrupt_affinity(struct efx_nic *efx)
361 {
362 	struct efx_channel *channel;
363 
364 	efx_for_each_channel(channel, efx)
365 		irq_set_affinity_hint(channel->irq, NULL);
366 }
367 #else
368 void
efx_set_interrupt_affinity(struct efx_nic * efx)369 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
370 {
371 }
372 
373 void
efx_clear_interrupt_affinity(struct efx_nic * efx)374 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
375 {
376 }
377 #endif /* CONFIG_SMP */
378 
efx_remove_interrupts(struct efx_nic * efx)379 void efx_remove_interrupts(struct efx_nic *efx)
380 {
381 	struct efx_channel *channel;
382 
383 	/* Remove MSI/MSI-X interrupts */
384 	efx_for_each_channel(channel, efx)
385 		channel->irq = 0;
386 	pci_disable_msi(efx->pci_dev);
387 	pci_disable_msix(efx->pci_dev);
388 
389 	/* Remove legacy interrupt */
390 	efx->legacy_irq = 0;
391 }
392 
393 /***************
394  * EVENT QUEUES
395  ***************/
396 
397 /* Create event queue
398  * Event queue memory allocations are done only once.  If the channel
399  * is reset, the memory buffer will be reused; this guards against
400  * errors during channel reset and also simplifies interrupt handling.
401  */
efx_probe_eventq(struct efx_channel * channel)402 int efx_probe_eventq(struct efx_channel *channel)
403 {
404 	struct efx_nic *efx = channel->efx;
405 	unsigned long entries;
406 
407 	netif_dbg(efx, probe, efx->net_dev,
408 		  "chan %d create event queue\n", channel->channel);
409 
410 	/* Build an event queue with room for one event per tx and rx buffer,
411 	 * plus some extra for link state events and MCDI completions.
412 	 */
413 	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
414 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
415 	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
416 
417 	return efx_nic_probe_eventq(channel);
418 }
419 
420 /* Prepare channel's event queue */
efx_init_eventq(struct efx_channel * channel)421 int efx_init_eventq(struct efx_channel *channel)
422 {
423 	struct efx_nic *efx = channel->efx;
424 	int rc;
425 
426 	EFX_WARN_ON_PARANOID(channel->eventq_init);
427 
428 	netif_dbg(efx, drv, efx->net_dev,
429 		  "chan %d init event queue\n", channel->channel);
430 
431 	rc = efx_nic_init_eventq(channel);
432 	if (rc == 0) {
433 		efx->type->push_irq_moderation(channel);
434 		channel->eventq_read_ptr = 0;
435 		channel->eventq_init = true;
436 	}
437 	return rc;
438 }
439 
440 /* Enable event queue processing and NAPI */
efx_start_eventq(struct efx_channel * channel)441 void efx_start_eventq(struct efx_channel *channel)
442 {
443 	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
444 		  "chan %d start event queue\n", channel->channel);
445 
446 	/* Make sure the NAPI handler sees the enabled flag set */
447 	channel->enabled = true;
448 	smp_wmb();
449 
450 	napi_enable(&channel->napi_str);
451 	efx_nic_eventq_read_ack(channel);
452 }
453 
454 /* Disable event queue processing and NAPI */
efx_stop_eventq(struct efx_channel * channel)455 void efx_stop_eventq(struct efx_channel *channel)
456 {
457 	if (!channel->enabled)
458 		return;
459 
460 	napi_disable(&channel->napi_str);
461 	channel->enabled = false;
462 }
463 
efx_fini_eventq(struct efx_channel * channel)464 void efx_fini_eventq(struct efx_channel *channel)
465 {
466 	if (!channel->eventq_init)
467 		return;
468 
469 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
470 		  "chan %d fini event queue\n", channel->channel);
471 
472 	efx_nic_fini_eventq(channel);
473 	channel->eventq_init = false;
474 }
475 
efx_remove_eventq(struct efx_channel * channel)476 void efx_remove_eventq(struct efx_channel *channel)
477 {
478 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
479 		  "chan %d remove event queue\n", channel->channel);
480 
481 	efx_nic_remove_eventq(channel);
482 }
483 
484 /**************************************************************************
485  *
486  * Channel handling
487  *
488  *************************************************************************/
489 
490 #ifdef CONFIG_RFS_ACCEL
efx_filter_rfs_expire(struct work_struct * data)491 static void efx_filter_rfs_expire(struct work_struct *data)
492 {
493 	struct delayed_work *dwork = to_delayed_work(data);
494 	struct efx_channel *channel;
495 	unsigned int time, quota;
496 
497 	channel = container_of(dwork, struct efx_channel, filter_work);
498 	time = jiffies - channel->rfs_last_expiry;
499 	quota = channel->rfs_filter_count * time / (30 * HZ);
500 	if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
501 		channel->rfs_last_expiry += time;
502 	/* Ensure we do more work eventually even if NAPI poll is not happening */
503 	schedule_delayed_work(dwork, 30 * HZ);
504 }
505 #endif
506 
507 /* Allocate and initialise a channel structure. */
efx_alloc_channel(struct efx_nic * efx,int i)508 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
509 {
510 	struct efx_rx_queue *rx_queue;
511 	struct efx_tx_queue *tx_queue;
512 	struct efx_channel *channel;
513 	int j;
514 
515 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
516 	if (!channel)
517 		return NULL;
518 
519 	channel->efx = efx;
520 	channel->channel = i;
521 	channel->type = &efx_default_channel_type;
522 
523 	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
524 		tx_queue = &channel->tx_queue[j];
525 		tx_queue->efx = efx;
526 		tx_queue->queue = -1;
527 		tx_queue->label = j;
528 		tx_queue->channel = channel;
529 	}
530 
531 #ifdef CONFIG_RFS_ACCEL
532 	INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
533 #endif
534 
535 	rx_queue = &channel->rx_queue;
536 	rx_queue->efx = efx;
537 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
538 
539 	return channel;
540 }
541 
efx_init_channels(struct efx_nic * efx)542 int efx_init_channels(struct efx_nic *efx)
543 {
544 	unsigned int i;
545 
546 	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
547 		efx->channel[i] = efx_alloc_channel(efx, i);
548 		if (!efx->channel[i])
549 			return -ENOMEM;
550 		efx->msi_context[i].efx = efx;
551 		efx->msi_context[i].index = i;
552 	}
553 
554 	/* Higher numbered interrupt modes are less capable! */
555 	efx->interrupt_mode = min(efx->type->min_interrupt_mode,
556 				  efx_interrupt_mode);
557 
558 	efx->max_channels = EFX_MAX_CHANNELS;
559 	efx->max_tx_channels = EFX_MAX_CHANNELS;
560 
561 	return 0;
562 }
563 
efx_fini_channels(struct efx_nic * efx)564 void efx_fini_channels(struct efx_nic *efx)
565 {
566 	unsigned int i;
567 
568 	for (i = 0; i < EFX_MAX_CHANNELS; i++)
569 		if (efx->channel[i]) {
570 			kfree(efx->channel[i]);
571 			efx->channel[i] = NULL;
572 		}
573 }
574 
575 /* Allocate and initialise a channel structure, copying parameters
576  * (but not resources) from an old channel structure.
577  */
efx_copy_channel(const struct efx_channel * old_channel)578 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
579 {
580 	struct efx_rx_queue *rx_queue;
581 	struct efx_tx_queue *tx_queue;
582 	struct efx_channel *channel;
583 	int j;
584 
585 	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
586 	if (!channel)
587 		return NULL;
588 
589 	*channel = *old_channel;
590 
591 	channel->napi_dev = NULL;
592 	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
593 	channel->napi_str.napi_id = 0;
594 	channel->napi_str.state = 0;
595 	memset(&channel->eventq, 0, sizeof(channel->eventq));
596 
597 	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
598 		tx_queue = &channel->tx_queue[j];
599 		if (tx_queue->channel)
600 			tx_queue->channel = channel;
601 		tx_queue->buffer = NULL;
602 		tx_queue->cb_page = NULL;
603 		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
604 	}
605 
606 	rx_queue = &channel->rx_queue;
607 	rx_queue->buffer = NULL;
608 	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
609 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
610 #ifdef CONFIG_RFS_ACCEL
611 	INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
612 #endif
613 
614 	return channel;
615 }
616 
efx_probe_channel(struct efx_channel * channel)617 static int efx_probe_channel(struct efx_channel *channel)
618 {
619 	struct efx_tx_queue *tx_queue;
620 	struct efx_rx_queue *rx_queue;
621 	int rc;
622 
623 	netif_dbg(channel->efx, probe, channel->efx->net_dev,
624 		  "creating channel %d\n", channel->channel);
625 
626 	rc = channel->type->pre_probe(channel);
627 	if (rc)
628 		goto fail;
629 
630 	rc = efx_probe_eventq(channel);
631 	if (rc)
632 		goto fail;
633 
634 	efx_for_each_channel_tx_queue(tx_queue, channel) {
635 		rc = efx_probe_tx_queue(tx_queue);
636 		if (rc)
637 			goto fail;
638 	}
639 
640 	efx_for_each_channel_rx_queue(rx_queue, channel) {
641 		rc = efx_probe_rx_queue(rx_queue);
642 		if (rc)
643 			goto fail;
644 	}
645 
646 	channel->rx_list = NULL;
647 
648 	return 0;
649 
650 fail:
651 	efx_remove_channel(channel);
652 	return rc;
653 }
654 
efx_get_channel_name(struct efx_channel * channel,char * buf,size_t len)655 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
656 {
657 	struct efx_nic *efx = channel->efx;
658 	const char *type;
659 	int number;
660 
661 	number = channel->channel;
662 
663 	if (number >= efx->xdp_channel_offset &&
664 	    !WARN_ON_ONCE(!efx->n_xdp_channels)) {
665 		type = "-xdp";
666 		number -= efx->xdp_channel_offset;
667 	} else if (efx->tx_channel_offset == 0) {
668 		type = "";
669 	} else if (number < efx->tx_channel_offset) {
670 		type = "-rx";
671 	} else {
672 		type = "-tx";
673 		number -= efx->tx_channel_offset;
674 	}
675 	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
676 }
677 
efx_set_channel_names(struct efx_nic * efx)678 void efx_set_channel_names(struct efx_nic *efx)
679 {
680 	struct efx_channel *channel;
681 
682 	efx_for_each_channel(channel, efx)
683 		channel->type->get_name(channel,
684 					efx->msi_context[channel->channel].name,
685 					sizeof(efx->msi_context[0].name));
686 }
687 
efx_probe_channels(struct efx_nic * efx)688 int efx_probe_channels(struct efx_nic *efx)
689 {
690 	struct efx_channel *channel;
691 	int rc;
692 
693 	/* Restart special buffer allocation */
694 	efx->next_buffer_table = 0;
695 
696 	/* Probe channels in reverse, so that any 'extra' channels
697 	 * use the start of the buffer table. This allows the traffic
698 	 * channels to be resized without moving them or wasting the
699 	 * entries before them.
700 	 */
701 	efx_for_each_channel_rev(channel, efx) {
702 		rc = efx_probe_channel(channel);
703 		if (rc) {
704 			netif_err(efx, probe, efx->net_dev,
705 				  "failed to create channel %d\n",
706 				  channel->channel);
707 			goto fail;
708 		}
709 	}
710 	efx_set_channel_names(efx);
711 
712 	return 0;
713 
714 fail:
715 	efx_remove_channels(efx);
716 	return rc;
717 }
718 
efx_remove_channel(struct efx_channel * channel)719 void efx_remove_channel(struct efx_channel *channel)
720 {
721 	struct efx_tx_queue *tx_queue;
722 	struct efx_rx_queue *rx_queue;
723 
724 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
725 		  "destroy chan %d\n", channel->channel);
726 
727 	efx_for_each_channel_rx_queue(rx_queue, channel)
728 		efx_remove_rx_queue(rx_queue);
729 	efx_for_each_channel_tx_queue(tx_queue, channel)
730 		efx_remove_tx_queue(tx_queue);
731 	efx_remove_eventq(channel);
732 	channel->type->post_remove(channel);
733 }
734 
efx_remove_channels(struct efx_nic * efx)735 void efx_remove_channels(struct efx_nic *efx)
736 {
737 	struct efx_channel *channel;
738 
739 	efx_for_each_channel(channel, efx)
740 		efx_remove_channel(channel);
741 
742 	kfree(efx->xdp_tx_queues);
743 }
744 
efx_realloc_channels(struct efx_nic * efx,u32 rxq_entries,u32 txq_entries)745 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
746 {
747 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
748 	unsigned int i, next_buffer_table = 0;
749 	u32 old_rxq_entries, old_txq_entries;
750 	int rc, rc2;
751 
752 	rc = efx_check_disabled(efx);
753 	if (rc)
754 		return rc;
755 
756 	/* Not all channels should be reallocated. We must avoid
757 	 * reallocating their buffer table entries.
758 	 */
759 	efx_for_each_channel(channel, efx) {
760 		struct efx_rx_queue *rx_queue;
761 		struct efx_tx_queue *tx_queue;
762 
763 		if (channel->type->copy)
764 			continue;
765 		next_buffer_table = max(next_buffer_table,
766 					channel->eventq.index +
767 					channel->eventq.entries);
768 		efx_for_each_channel_rx_queue(rx_queue, channel)
769 			next_buffer_table = max(next_buffer_table,
770 						rx_queue->rxd.index +
771 						rx_queue->rxd.entries);
772 		efx_for_each_channel_tx_queue(tx_queue, channel)
773 			next_buffer_table = max(next_buffer_table,
774 						tx_queue->txd.index +
775 						tx_queue->txd.entries);
776 	}
777 
778 	efx_device_detach_sync(efx);
779 	efx_stop_all(efx);
780 	efx_soft_disable_interrupts(efx);
781 
782 	/* Clone channels (where possible) */
783 	memset(other_channel, 0, sizeof(other_channel));
784 	for (i = 0; i < efx->n_channels; i++) {
785 		channel = efx->channel[i];
786 		if (channel->type->copy)
787 			channel = channel->type->copy(channel);
788 		if (!channel) {
789 			rc = -ENOMEM;
790 			goto out;
791 		}
792 		other_channel[i] = channel;
793 	}
794 
795 	/* Swap entry counts and channel pointers */
796 	old_rxq_entries = efx->rxq_entries;
797 	old_txq_entries = efx->txq_entries;
798 	efx->rxq_entries = rxq_entries;
799 	efx->txq_entries = txq_entries;
800 	for (i = 0; i < efx->n_channels; i++) {
801 		channel = efx->channel[i];
802 		efx->channel[i] = other_channel[i];
803 		other_channel[i] = channel;
804 	}
805 
806 	/* Restart buffer table allocation */
807 	efx->next_buffer_table = next_buffer_table;
808 
809 	for (i = 0; i < efx->n_channels; i++) {
810 		channel = efx->channel[i];
811 		if (!channel->type->copy)
812 			continue;
813 		rc = efx_probe_channel(channel);
814 		if (rc)
815 			goto rollback;
816 		efx_init_napi_channel(efx->channel[i]);
817 	}
818 
819 out:
820 	/* Destroy unused channel structures */
821 	for (i = 0; i < efx->n_channels; i++) {
822 		channel = other_channel[i];
823 		if (channel && channel->type->copy) {
824 			efx_fini_napi_channel(channel);
825 			efx_remove_channel(channel);
826 			kfree(channel);
827 		}
828 	}
829 
830 	rc2 = efx_soft_enable_interrupts(efx);
831 	if (rc2) {
832 		rc = rc ? rc : rc2;
833 		netif_err(efx, drv, efx->net_dev,
834 			  "unable to restart interrupts on channel reallocation\n");
835 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
836 	} else {
837 		efx_start_all(efx);
838 		efx_device_attach_if_not_resetting(efx);
839 	}
840 	return rc;
841 
842 rollback:
843 	/* Swap back */
844 	efx->rxq_entries = old_rxq_entries;
845 	efx->txq_entries = old_txq_entries;
846 	for (i = 0; i < efx->n_channels; i++) {
847 		channel = efx->channel[i];
848 		efx->channel[i] = other_channel[i];
849 		other_channel[i] = channel;
850 	}
851 	goto out;
852 }
853 
efx_set_channels(struct efx_nic * efx)854 int efx_set_channels(struct efx_nic *efx)
855 {
856 	struct efx_tx_queue *tx_queue;
857 	struct efx_channel *channel;
858 	unsigned int next_queue = 0;
859 	int xdp_queue_number;
860 	int rc;
861 
862 	efx->tx_channel_offset =
863 		efx_separate_tx_channels ?
864 		efx->n_channels - efx->n_tx_channels : 0;
865 
866 	if (efx->xdp_tx_queue_count) {
867 		EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
868 
869 		/* Allocate array for XDP TX queue lookup. */
870 		efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
871 					     sizeof(*efx->xdp_tx_queues),
872 					     GFP_KERNEL);
873 		if (!efx->xdp_tx_queues)
874 			return -ENOMEM;
875 	}
876 
877 	/* We need to mark which channels really have RX and TX
878 	 * queues, and adjust the TX queue numbers if we have separate
879 	 * RX-only and TX-only channels.
880 	 */
881 	xdp_queue_number = 0;
882 	efx_for_each_channel(channel, efx) {
883 		if (channel->channel < efx->n_rx_channels)
884 			channel->rx_queue.core_index = channel->channel;
885 		else
886 			channel->rx_queue.core_index = -1;
887 
888 		if (channel->channel >= efx->tx_channel_offset) {
889 			if (efx_channel_is_xdp_tx(channel)) {
890 				efx_for_each_channel_tx_queue(tx_queue, channel) {
891 					tx_queue->queue = next_queue++;
892 
893 					/* We may have a few left-over XDP TX
894 					 * queues owing to xdp_tx_queue_count
895 					 * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
896 					 * We still allocate and probe those
897 					 * TXQs, but never use them.
898 					 */
899 					if (xdp_queue_number < efx->xdp_tx_queue_count) {
900 						netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
901 							  channel->channel, tx_queue->label,
902 							  xdp_queue_number, tx_queue->queue);
903 						efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
904 						xdp_queue_number++;
905 					}
906 				}
907 			} else {
908 				efx_for_each_channel_tx_queue(tx_queue, channel) {
909 					tx_queue->queue = next_queue++;
910 					netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
911 						  channel->channel, tx_queue->label,
912 						  tx_queue->queue);
913 				}
914 			}
915 		}
916 	}
917 	WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
918 
919 	rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
920 	if (rc)
921 		return rc;
922 	return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
923 }
924 
efx_default_channel_want_txqs(struct efx_channel * channel)925 bool efx_default_channel_want_txqs(struct efx_channel *channel)
926 {
927 	return channel->channel - channel->efx->tx_channel_offset <
928 		channel->efx->n_tx_channels;
929 }
930 
931 /*************
932  * START/STOP
933  *************/
934 
efx_soft_enable_interrupts(struct efx_nic * efx)935 int efx_soft_enable_interrupts(struct efx_nic *efx)
936 {
937 	struct efx_channel *channel, *end_channel;
938 	int rc;
939 
940 	BUG_ON(efx->state == STATE_DISABLED);
941 
942 	efx->irq_soft_enabled = true;
943 	smp_wmb();
944 
945 	efx_for_each_channel(channel, efx) {
946 		if (!channel->type->keep_eventq) {
947 			rc = efx_init_eventq(channel);
948 			if (rc)
949 				goto fail;
950 		}
951 		efx_start_eventq(channel);
952 	}
953 
954 	efx_mcdi_mode_event(efx);
955 
956 	return 0;
957 fail:
958 	end_channel = channel;
959 	efx_for_each_channel(channel, efx) {
960 		if (channel == end_channel)
961 			break;
962 		efx_stop_eventq(channel);
963 		if (!channel->type->keep_eventq)
964 			efx_fini_eventq(channel);
965 	}
966 
967 	return rc;
968 }
969 
efx_soft_disable_interrupts(struct efx_nic * efx)970 void efx_soft_disable_interrupts(struct efx_nic *efx)
971 {
972 	struct efx_channel *channel;
973 
974 	if (efx->state == STATE_DISABLED)
975 		return;
976 
977 	efx_mcdi_mode_poll(efx);
978 
979 	efx->irq_soft_enabled = false;
980 	smp_wmb();
981 
982 	if (efx->legacy_irq)
983 		synchronize_irq(efx->legacy_irq);
984 
985 	efx_for_each_channel(channel, efx) {
986 		if (channel->irq)
987 			synchronize_irq(channel->irq);
988 
989 		efx_stop_eventq(channel);
990 		if (!channel->type->keep_eventq)
991 			efx_fini_eventq(channel);
992 	}
993 
994 	/* Flush the asynchronous MCDI request queue */
995 	efx_mcdi_flush_async(efx);
996 }
997 
efx_enable_interrupts(struct efx_nic * efx)998 int efx_enable_interrupts(struct efx_nic *efx)
999 {
1000 	struct efx_channel *channel, *end_channel;
1001 	int rc;
1002 
1003 	/* TODO: Is this really a bug? */
1004 	BUG_ON(efx->state == STATE_DISABLED);
1005 
1006 	if (efx->eeh_disabled_legacy_irq) {
1007 		enable_irq(efx->legacy_irq);
1008 		efx->eeh_disabled_legacy_irq = false;
1009 	}
1010 
1011 	efx->type->irq_enable_master(efx);
1012 
1013 	efx_for_each_channel(channel, efx) {
1014 		if (channel->type->keep_eventq) {
1015 			rc = efx_init_eventq(channel);
1016 			if (rc)
1017 				goto fail;
1018 		}
1019 	}
1020 
1021 	rc = efx_soft_enable_interrupts(efx);
1022 	if (rc)
1023 		goto fail;
1024 
1025 	return 0;
1026 
1027 fail:
1028 	end_channel = channel;
1029 	efx_for_each_channel(channel, efx) {
1030 		if (channel == end_channel)
1031 			break;
1032 		if (channel->type->keep_eventq)
1033 			efx_fini_eventq(channel);
1034 	}
1035 
1036 	efx->type->irq_disable_non_ev(efx);
1037 
1038 	return rc;
1039 }
1040 
efx_disable_interrupts(struct efx_nic * efx)1041 void efx_disable_interrupts(struct efx_nic *efx)
1042 {
1043 	struct efx_channel *channel;
1044 
1045 	efx_soft_disable_interrupts(efx);
1046 
1047 	efx_for_each_channel(channel, efx) {
1048 		if (channel->type->keep_eventq)
1049 			efx_fini_eventq(channel);
1050 	}
1051 
1052 	efx->type->irq_disable_non_ev(efx);
1053 }
1054 
efx_start_channels(struct efx_nic * efx)1055 void efx_start_channels(struct efx_nic *efx)
1056 {
1057 	struct efx_tx_queue *tx_queue;
1058 	struct efx_rx_queue *rx_queue;
1059 	struct efx_channel *channel;
1060 
1061 	efx_for_each_channel(channel, efx) {
1062 		efx_for_each_channel_tx_queue(tx_queue, channel) {
1063 			efx_init_tx_queue(tx_queue);
1064 			atomic_inc(&efx->active_queues);
1065 		}
1066 
1067 		efx_for_each_channel_rx_queue(rx_queue, channel) {
1068 			efx_init_rx_queue(rx_queue);
1069 			atomic_inc(&efx->active_queues);
1070 			efx_stop_eventq(channel);
1071 			efx_fast_push_rx_descriptors(rx_queue, false);
1072 			efx_start_eventq(channel);
1073 		}
1074 
1075 		WARN_ON(channel->rx_pkt_n_frags);
1076 	}
1077 }
1078 
efx_stop_channels(struct efx_nic * efx)1079 void efx_stop_channels(struct efx_nic *efx)
1080 {
1081 	struct efx_tx_queue *tx_queue;
1082 	struct efx_rx_queue *rx_queue;
1083 	struct efx_channel *channel;
1084 	int rc = 0;
1085 
1086 	/* Stop RX refill */
1087 	efx_for_each_channel(channel, efx) {
1088 		efx_for_each_channel_rx_queue(rx_queue, channel)
1089 			rx_queue->refill_enabled = false;
1090 	}
1091 
1092 	efx_for_each_channel(channel, efx) {
1093 		/* RX packet processing is pipelined, so wait for the
1094 		 * NAPI handler to complete.  At least event queue 0
1095 		 * might be kept active by non-data events, so don't
1096 		 * use napi_synchronize() but actually disable NAPI
1097 		 * temporarily.
1098 		 */
1099 		if (efx_channel_has_rx_queue(channel)) {
1100 			efx_stop_eventq(channel);
1101 			efx_start_eventq(channel);
1102 		}
1103 	}
1104 
1105 	if (efx->type->fini_dmaq)
1106 		rc = efx->type->fini_dmaq(efx);
1107 
1108 	if (rc) {
1109 		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1110 	} else {
1111 		netif_dbg(efx, drv, efx->net_dev,
1112 			  "successfully flushed all queues\n");
1113 	}
1114 
1115 	efx_for_each_channel(channel, efx) {
1116 		efx_for_each_channel_rx_queue(rx_queue, channel)
1117 			efx_fini_rx_queue(rx_queue);
1118 		efx_for_each_channel_tx_queue(tx_queue, channel)
1119 			efx_fini_tx_queue(tx_queue);
1120 	}
1121 }
1122 
1123 /**************************************************************************
1124  *
1125  * NAPI interface
1126  *
1127  *************************************************************************/
1128 
1129 /* Process channel's event queue
1130  *
1131  * This function is responsible for processing the event queue of a
1132  * single channel.  The caller must guarantee that this function will
1133  * never be concurrently called more than once on the same channel,
1134  * though different channels may be being processed concurrently.
1135  */
efx_process_channel(struct efx_channel * channel,int budget)1136 static int efx_process_channel(struct efx_channel *channel, int budget)
1137 {
1138 	struct efx_tx_queue *tx_queue;
1139 	struct list_head rx_list;
1140 	int spent;
1141 
1142 	if (unlikely(!channel->enabled))
1143 		return 0;
1144 
1145 	/* Prepare the batch receive list */
1146 	EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1147 	INIT_LIST_HEAD(&rx_list);
1148 	channel->rx_list = &rx_list;
1149 
1150 	efx_for_each_channel_tx_queue(tx_queue, channel) {
1151 		tx_queue->pkts_compl = 0;
1152 		tx_queue->bytes_compl = 0;
1153 	}
1154 
1155 	spent = efx_nic_process_eventq(channel, budget);
1156 	if (spent && efx_channel_has_rx_queue(channel)) {
1157 		struct efx_rx_queue *rx_queue =
1158 			efx_channel_get_rx_queue(channel);
1159 
1160 		efx_rx_flush_packet(channel);
1161 		efx_fast_push_rx_descriptors(rx_queue, true);
1162 	}
1163 
1164 	/* Update BQL */
1165 	efx_for_each_channel_tx_queue(tx_queue, channel) {
1166 		if (tx_queue->bytes_compl) {
1167 			netdev_tx_completed_queue(tx_queue->core_txq,
1168 						  tx_queue->pkts_compl,
1169 						  tx_queue->bytes_compl);
1170 		}
1171 	}
1172 
1173 	/* Receive any packets we queued up */
1174 	netif_receive_skb_list(channel->rx_list);
1175 	channel->rx_list = NULL;
1176 
1177 	return spent;
1178 }
1179 
efx_update_irq_mod(struct efx_nic * efx,struct efx_channel * channel)1180 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1181 {
1182 	int step = efx->irq_mod_step_us;
1183 
1184 	if (channel->irq_mod_score < irq_adapt_low_thresh) {
1185 		if (channel->irq_moderation_us > step) {
1186 			channel->irq_moderation_us -= step;
1187 			efx->type->push_irq_moderation(channel);
1188 		}
1189 	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1190 		if (channel->irq_moderation_us <
1191 		    efx->irq_rx_moderation_us) {
1192 			channel->irq_moderation_us += step;
1193 			efx->type->push_irq_moderation(channel);
1194 		}
1195 	}
1196 
1197 	channel->irq_count = 0;
1198 	channel->irq_mod_score = 0;
1199 }
1200 
1201 /* NAPI poll handler
1202  *
1203  * NAPI guarantees serialisation of polls of the same device, which
1204  * provides the guarantee required by efx_process_channel().
1205  */
efx_poll(struct napi_struct * napi,int budget)1206 static int efx_poll(struct napi_struct *napi, int budget)
1207 {
1208 	struct efx_channel *channel =
1209 		container_of(napi, struct efx_channel, napi_str);
1210 	struct efx_nic *efx = channel->efx;
1211 #ifdef CONFIG_RFS_ACCEL
1212 	unsigned int time;
1213 #endif
1214 	int spent;
1215 
1216 	netif_vdbg(efx, intr, efx->net_dev,
1217 		   "channel %d NAPI poll executing on CPU %d\n",
1218 		   channel->channel, raw_smp_processor_id());
1219 
1220 	spent = efx_process_channel(channel, budget);
1221 
1222 	xdp_do_flush_map();
1223 
1224 	if (spent < budget) {
1225 		if (efx_channel_has_rx_queue(channel) &&
1226 		    efx->irq_rx_adaptive &&
1227 		    unlikely(++channel->irq_count == 1000)) {
1228 			efx_update_irq_mod(efx, channel);
1229 		}
1230 
1231 #ifdef CONFIG_RFS_ACCEL
1232 		/* Perhaps expire some ARFS filters */
1233 		time = jiffies - channel->rfs_last_expiry;
1234 		/* Would our quota be >= 20? */
1235 		if (channel->rfs_filter_count * time >= 600 * HZ)
1236 			mod_delayed_work(system_wq, &channel->filter_work, 0);
1237 #endif
1238 
1239 		/* There is no race here; although napi_disable() will
1240 		 * only wait for napi_complete(), this isn't a problem
1241 		 * since efx_nic_eventq_read_ack() will have no effect if
1242 		 * interrupts have already been disabled.
1243 		 */
1244 		if (napi_complete_done(napi, spent))
1245 			efx_nic_eventq_read_ack(channel);
1246 	}
1247 
1248 	return spent;
1249 }
1250 
efx_init_napi_channel(struct efx_channel * channel)1251 void efx_init_napi_channel(struct efx_channel *channel)
1252 {
1253 	struct efx_nic *efx = channel->efx;
1254 
1255 	channel->napi_dev = efx->net_dev;
1256 	netif_napi_add(channel->napi_dev, &channel->napi_str,
1257 		       efx_poll, napi_weight);
1258 }
1259 
efx_init_napi(struct efx_nic * efx)1260 void efx_init_napi(struct efx_nic *efx)
1261 {
1262 	struct efx_channel *channel;
1263 
1264 	efx_for_each_channel(channel, efx)
1265 		efx_init_napi_channel(channel);
1266 }
1267 
efx_fini_napi_channel(struct efx_channel * channel)1268 void efx_fini_napi_channel(struct efx_channel *channel)
1269 {
1270 	if (channel->napi_dev)
1271 		netif_napi_del(&channel->napi_str);
1272 
1273 	channel->napi_dev = NULL;
1274 }
1275 
efx_fini_napi(struct efx_nic * efx)1276 void efx_fini_napi(struct efx_nic *efx)
1277 {
1278 	struct efx_channel *channel;
1279 
1280 	efx_for_each_channel(channel, efx)
1281 		efx_fini_napi_channel(channel);
1282 }
1283