• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
14 #include "efx.h"
15 #include "nic.h"
16 #include "rx_common.h"
17 
18 /* This is the percentage fill level below which new RX descriptors
19  * will be added to the RX descriptor ring.
20  */
21 static unsigned int rx_refill_threshold;
22 module_param(rx_refill_threshold, uint, 0444);
23 MODULE_PARM_DESC(rx_refill_threshold,
24 		 "RX descriptor ring refill threshold (%)");
25 
26 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
27  * ring, this number is divided by the number of buffers per page to calculate
28  * the number of pages to store in the RX page recycle ring.
29  */
30 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
31 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
32 
33 /* RX maximum head room required.
34  *
35  * This must be at least 1 to prevent overflow, plus one packet-worth
36  * to allow pipelined receives.
37  */
38 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
39 
40 /* Check the RX page recycle ring for a page that can be reused. */
efx_reuse_page(struct efx_rx_queue * rx_queue)41 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
42 {
43 	struct efx_nic *efx = rx_queue->efx;
44 	struct efx_rx_page_state *state;
45 	unsigned int index;
46 	struct page *page;
47 
48 	if (unlikely(!rx_queue->page_ring))
49 		return NULL;
50 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
51 	page = rx_queue->page_ring[index];
52 	if (page == NULL)
53 		return NULL;
54 
55 	rx_queue->page_ring[index] = NULL;
56 	/* page_remove cannot exceed page_add. */
57 	if (rx_queue->page_remove != rx_queue->page_add)
58 		++rx_queue->page_remove;
59 
60 	/* If page_count is 1 then we hold the only reference to this page. */
61 	if (page_count(page) == 1) {
62 		++rx_queue->page_recycle_count;
63 		return page;
64 	} else {
65 		state = page_address(page);
66 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
67 			       PAGE_SIZE << efx->rx_buffer_order,
68 			       DMA_FROM_DEVICE);
69 		put_page(page);
70 		++rx_queue->page_recycle_failed;
71 	}
72 
73 	return NULL;
74 }
75 
76 /* Attempt to recycle the page if there is an RX recycle ring; the page can
77  * only be added if this is the final RX buffer, to prevent pages being used in
78  * the descriptor ring and appearing in the recycle ring simultaneously.
79  */
efx_recycle_rx_page(struct efx_channel * channel,struct efx_rx_buffer * rx_buf)80 static void efx_recycle_rx_page(struct efx_channel *channel,
81 				struct efx_rx_buffer *rx_buf)
82 {
83 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
84 	struct efx_nic *efx = rx_queue->efx;
85 	struct page *page = rx_buf->page;
86 	unsigned int index;
87 
88 	/* Only recycle the page after processing the final buffer. */
89 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
90 		return;
91 
92 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
93 	if (rx_queue->page_ring[index] == NULL) {
94 		unsigned int read_index = rx_queue->page_remove &
95 			rx_queue->page_ptr_mask;
96 
97 		/* The next slot in the recycle ring is available, but
98 		 * increment page_remove if the read pointer currently
99 		 * points here.
100 		 */
101 		if (read_index == index)
102 			++rx_queue->page_remove;
103 		rx_queue->page_ring[index] = page;
104 		++rx_queue->page_add;
105 		return;
106 	}
107 	++rx_queue->page_recycle_full;
108 	efx_unmap_rx_buffer(efx, rx_buf);
109 	put_page(rx_buf->page);
110 }
111 
112 /* Recycle the pages that are used by buffers that have just been received. */
efx_recycle_rx_pages(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)113 void efx_recycle_rx_pages(struct efx_channel *channel,
114 			  struct efx_rx_buffer *rx_buf,
115 			  unsigned int n_frags)
116 {
117 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
118 
119 	if (unlikely(!rx_queue->page_ring))
120 		return;
121 
122 	do {
123 		efx_recycle_rx_page(channel, rx_buf);
124 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
125 	} while (--n_frags);
126 }
127 
efx_discard_rx_packet(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)128 void efx_discard_rx_packet(struct efx_channel *channel,
129 			   struct efx_rx_buffer *rx_buf,
130 			   unsigned int n_frags)
131 {
132 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
133 
134 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
135 
136 	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
137 }
138 
efx_init_rx_recycle_ring(struct efx_rx_queue * rx_queue)139 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
140 {
141 	unsigned int bufs_in_recycle_ring, page_ring_size;
142 	struct efx_nic *efx = rx_queue->efx;
143 
144 	/* Set the RX recycle ring size */
145 #ifdef CONFIG_PPC64
146 	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
147 #else
148 	if (iommu_present(&pci_bus_type))
149 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
150 	else
151 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
152 #endif /* CONFIG_PPC64 */
153 
154 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
155 					    efx->rx_bufs_per_page);
156 	rx_queue->page_ring = kcalloc(page_ring_size,
157 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
158 	if (!rx_queue->page_ring)
159 		rx_queue->page_ptr_mask = 0;
160 	else
161 		rx_queue->page_ptr_mask = page_ring_size - 1;
162 }
163 
efx_fini_rx_recycle_ring(struct efx_rx_queue * rx_queue)164 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
165 {
166 	struct efx_nic *efx = rx_queue->efx;
167 	int i;
168 
169 	if (unlikely(!rx_queue->page_ring))
170 		return;
171 
172 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
173 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
174 		struct page *page = rx_queue->page_ring[i];
175 		struct efx_rx_page_state *state;
176 
177 		if (page == NULL)
178 			continue;
179 
180 		state = page_address(page);
181 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
182 			       PAGE_SIZE << efx->rx_buffer_order,
183 			       DMA_FROM_DEVICE);
184 		put_page(page);
185 	}
186 	kfree(rx_queue->page_ring);
187 	rx_queue->page_ring = NULL;
188 }
189 
efx_fini_rx_buffer(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf)190 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
191 			       struct efx_rx_buffer *rx_buf)
192 {
193 	/* Release the page reference we hold for the buffer. */
194 	if (rx_buf->page)
195 		put_page(rx_buf->page);
196 
197 	/* If this is the last buffer in a page, unmap and free it. */
198 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
199 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
200 		efx_free_rx_buffers(rx_queue, rx_buf, 1);
201 	}
202 	rx_buf->page = NULL;
203 }
204 
efx_probe_rx_queue(struct efx_rx_queue * rx_queue)205 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
206 {
207 	struct efx_nic *efx = rx_queue->efx;
208 	unsigned int entries;
209 	int rc;
210 
211 	/* Create the smallest power-of-two aligned ring */
212 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
213 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
214 	rx_queue->ptr_mask = entries - 1;
215 
216 	netif_dbg(efx, probe, efx->net_dev,
217 		  "creating RX queue %d size %#x mask %#x\n",
218 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
219 		  rx_queue->ptr_mask);
220 
221 	/* Allocate RX buffers */
222 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
223 				   GFP_KERNEL);
224 	if (!rx_queue->buffer)
225 		return -ENOMEM;
226 
227 	rc = efx_nic_probe_rx(rx_queue);
228 	if (rc) {
229 		kfree(rx_queue->buffer);
230 		rx_queue->buffer = NULL;
231 	}
232 
233 	return rc;
234 }
235 
efx_init_rx_queue(struct efx_rx_queue * rx_queue)236 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
237 {
238 	unsigned int max_fill, trigger, max_trigger;
239 	struct efx_nic *efx = rx_queue->efx;
240 	int rc = 0;
241 
242 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
243 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
244 
245 	/* Initialise ptr fields */
246 	rx_queue->added_count = 0;
247 	rx_queue->notified_count = 0;
248 	rx_queue->removed_count = 0;
249 	rx_queue->min_fill = -1U;
250 	efx_init_rx_recycle_ring(rx_queue);
251 
252 	rx_queue->page_remove = 0;
253 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
254 	rx_queue->page_recycle_count = 0;
255 	rx_queue->page_recycle_failed = 0;
256 	rx_queue->page_recycle_full = 0;
257 
258 	/* Initialise limit fields */
259 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
260 	max_trigger =
261 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
262 	if (rx_refill_threshold != 0) {
263 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
264 		if (trigger > max_trigger)
265 			trigger = max_trigger;
266 	} else {
267 		trigger = max_trigger;
268 	}
269 
270 	rx_queue->max_fill = max_fill;
271 	rx_queue->fast_fill_trigger = trigger;
272 	rx_queue->refill_enabled = true;
273 
274 	/* Initialise XDP queue information */
275 	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
276 			      rx_queue->core_index);
277 
278 	if (rc) {
279 		netif_err(efx, rx_err, efx->net_dev,
280 			  "Failure to initialise XDP queue information rc=%d\n",
281 			  rc);
282 		efx->xdp_rxq_info_failed = true;
283 	} else {
284 		rx_queue->xdp_rxq_info_valid = true;
285 	}
286 
287 	/* Set up RX descriptor ring */
288 	efx_nic_init_rx(rx_queue);
289 }
290 
efx_fini_rx_queue(struct efx_rx_queue * rx_queue)291 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
292 {
293 	struct efx_rx_buffer *rx_buf;
294 	int i;
295 
296 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
297 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
298 
299 	del_timer_sync(&rx_queue->slow_fill);
300 
301 	/* Release RX buffers from the current read ptr to the write ptr */
302 	if (rx_queue->buffer) {
303 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
304 		     i++) {
305 			unsigned int index = i & rx_queue->ptr_mask;
306 
307 			rx_buf = efx_rx_buffer(rx_queue, index);
308 			efx_fini_rx_buffer(rx_queue, rx_buf);
309 		}
310 	}
311 
312 	efx_fini_rx_recycle_ring(rx_queue);
313 
314 	if (rx_queue->xdp_rxq_info_valid)
315 		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
316 
317 	rx_queue->xdp_rxq_info_valid = false;
318 }
319 
efx_remove_rx_queue(struct efx_rx_queue * rx_queue)320 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
321 {
322 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
323 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
324 
325 	efx_nic_remove_rx(rx_queue);
326 
327 	kfree(rx_queue->buffer);
328 	rx_queue->buffer = NULL;
329 }
330 
331 /* Unmap a DMA-mapped page.  This function is only called for the final RX
332  * buffer in a page.
333  */
efx_unmap_rx_buffer(struct efx_nic * efx,struct efx_rx_buffer * rx_buf)334 void efx_unmap_rx_buffer(struct efx_nic *efx,
335 			 struct efx_rx_buffer *rx_buf)
336 {
337 	struct page *page = rx_buf->page;
338 
339 	if (page) {
340 		struct efx_rx_page_state *state = page_address(page);
341 
342 		dma_unmap_page(&efx->pci_dev->dev,
343 			       state->dma_addr,
344 			       PAGE_SIZE << efx->rx_buffer_order,
345 			       DMA_FROM_DEVICE);
346 	}
347 }
348 
efx_free_rx_buffers(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf,unsigned int num_bufs)349 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
350 			 struct efx_rx_buffer *rx_buf,
351 			 unsigned int num_bufs)
352 {
353 	do {
354 		if (rx_buf->page) {
355 			put_page(rx_buf->page);
356 			rx_buf->page = NULL;
357 		}
358 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
359 	} while (--num_bufs);
360 }
361 
efx_rx_slow_fill(struct timer_list * t)362 void efx_rx_slow_fill(struct timer_list *t)
363 {
364 	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
365 
366 	/* Post an event to cause NAPI to run and refill the queue */
367 	efx_nic_generate_fill_event(rx_queue);
368 	++rx_queue->slow_fill_count;
369 }
370 
efx_schedule_slow_fill(struct efx_rx_queue * rx_queue)371 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
372 {
373 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
374 }
375 
376 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
377  *
378  * @rx_queue:		Efx RX queue
379  *
380  * This allocates a batch of pages, maps them for DMA, and populates
381  * struct efx_rx_buffers for each one. Return a negative error code or
382  * 0 on success. If a single page can be used for multiple buffers,
383  * then the page will either be inserted fully, or not at all.
384  */
efx_init_rx_buffers(struct efx_rx_queue * rx_queue,bool atomic)385 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
386 {
387 	unsigned int page_offset, index, count;
388 	struct efx_nic *efx = rx_queue->efx;
389 	struct efx_rx_page_state *state;
390 	struct efx_rx_buffer *rx_buf;
391 	dma_addr_t dma_addr;
392 	struct page *page;
393 
394 	count = 0;
395 	do {
396 		page = efx_reuse_page(rx_queue);
397 		if (page == NULL) {
398 			page = alloc_pages(__GFP_COMP |
399 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
400 					   efx->rx_buffer_order);
401 			if (unlikely(page == NULL))
402 				return -ENOMEM;
403 			dma_addr =
404 				dma_map_page(&efx->pci_dev->dev, page, 0,
405 					     PAGE_SIZE << efx->rx_buffer_order,
406 					     DMA_FROM_DEVICE);
407 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
408 						       dma_addr))) {
409 				__free_pages(page, efx->rx_buffer_order);
410 				return -EIO;
411 			}
412 			state = page_address(page);
413 			state->dma_addr = dma_addr;
414 		} else {
415 			state = page_address(page);
416 			dma_addr = state->dma_addr;
417 		}
418 
419 		dma_addr += sizeof(struct efx_rx_page_state);
420 		page_offset = sizeof(struct efx_rx_page_state);
421 
422 		do {
423 			index = rx_queue->added_count & rx_queue->ptr_mask;
424 			rx_buf = efx_rx_buffer(rx_queue, index);
425 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
426 					   EFX_XDP_HEADROOM;
427 			rx_buf->page = page;
428 			rx_buf->page_offset = page_offset + efx->rx_ip_align +
429 					      EFX_XDP_HEADROOM;
430 			rx_buf->len = efx->rx_dma_len;
431 			rx_buf->flags = 0;
432 			++rx_queue->added_count;
433 			get_page(page);
434 			dma_addr += efx->rx_page_buf_step;
435 			page_offset += efx->rx_page_buf_step;
436 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
437 
438 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
439 	} while (++count < efx->rx_pages_per_batch);
440 
441 	return 0;
442 }
443 
efx_rx_config_page_split(struct efx_nic * efx)444 void efx_rx_config_page_split(struct efx_nic *efx)
445 {
446 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
447 				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
448 				      EFX_RX_BUF_ALIGNMENT);
449 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
450 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
451 		efx->rx_page_buf_step);
452 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
453 		efx->rx_bufs_per_page;
454 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
455 					       efx->rx_bufs_per_page);
456 }
457 
458 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
459  * @rx_queue:		RX descriptor queue
460  *
461  * This will aim to fill the RX descriptor queue up to
462  * @rx_queue->@max_fill. If there is insufficient atomic
463  * memory to do so, a slow fill will be scheduled.
464  *
465  * The caller must provide serialisation (none is used here). In practise,
466  * this means this function must run from the NAPI handler, or be called
467  * when NAPI is disabled.
468  */
efx_fast_push_rx_descriptors(struct efx_rx_queue * rx_queue,bool atomic)469 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
470 {
471 	struct efx_nic *efx = rx_queue->efx;
472 	unsigned int fill_level, batch_size;
473 	int space, rc = 0;
474 
475 	if (!rx_queue->refill_enabled)
476 		return;
477 
478 	/* Calculate current fill level, and exit if we don't need to fill */
479 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
480 	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
481 	if (fill_level >= rx_queue->fast_fill_trigger)
482 		goto out;
483 
484 	/* Record minimum fill level */
485 	if (unlikely(fill_level < rx_queue->min_fill)) {
486 		if (fill_level)
487 			rx_queue->min_fill = fill_level;
488 	}
489 
490 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
491 	space = rx_queue->max_fill - fill_level;
492 	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
493 
494 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
495 		   "RX queue %d fast-filling descriptor ring from"
496 		   " level %d to level %d\n",
497 		   efx_rx_queue_index(rx_queue), fill_level,
498 		   rx_queue->max_fill);
499 
500 	do {
501 		rc = efx_init_rx_buffers(rx_queue, atomic);
502 		if (unlikely(rc)) {
503 			/* Ensure that we don't leave the rx queue empty */
504 			efx_schedule_slow_fill(rx_queue);
505 			goto out;
506 		}
507 	} while ((space -= batch_size) >= batch_size);
508 
509 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
510 		   "RX queue %d fast-filled descriptor ring "
511 		   "to level %d\n", efx_rx_queue_index(rx_queue),
512 		   rx_queue->added_count - rx_queue->removed_count);
513 
514  out:
515 	if (rx_queue->notified_count != rx_queue->added_count)
516 		efx_nic_notify_rx_desc(rx_queue);
517 }
518 
519 /* Pass a received packet up through GRO.  GRO can handle pages
520  * regardless of checksum state and skbs with a good checksum.
521  */
522 void
efx_rx_packet_gro(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags,u8 * eh,__wsum csum)523 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
524 		  unsigned int n_frags, u8 *eh, __wsum csum)
525 {
526 	struct napi_struct *napi = &channel->napi_str;
527 	struct efx_nic *efx = channel->efx;
528 	struct sk_buff *skb;
529 
530 	skb = napi_get_frags(napi);
531 	if (unlikely(!skb)) {
532 		struct efx_rx_queue *rx_queue;
533 
534 		rx_queue = efx_channel_get_rx_queue(channel);
535 		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
536 		return;
537 	}
538 
539 	if (efx->net_dev->features & NETIF_F_RXHASH &&
540 	    efx_rx_buf_hash_valid(efx, eh))
541 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
542 			     PKT_HASH_TYPE_L3);
543 	if (csum) {
544 		skb->csum = csum;
545 		skb->ip_summed = CHECKSUM_COMPLETE;
546 	} else {
547 		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
548 				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
549 	}
550 	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
551 
552 	for (;;) {
553 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
554 				   rx_buf->page, rx_buf->page_offset,
555 				   rx_buf->len);
556 		rx_buf->page = NULL;
557 		skb->len += rx_buf->len;
558 		if (skb_shinfo(skb)->nr_frags == n_frags)
559 			break;
560 
561 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
562 	}
563 
564 	skb->data_len = skb->len;
565 	skb->truesize += n_frags * efx->rx_buffer_truesize;
566 
567 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
568 
569 	napi_gro_frags(napi);
570 }
571 
572 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
573  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
574  */
efx_alloc_rss_context_entry(struct efx_nic * efx)575 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
576 {
577 	struct list_head *head = &efx->rss_context.list;
578 	struct efx_rss_context *ctx, *new;
579 	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
580 
581 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
582 
583 	/* Search for first gap in the numbering */
584 	list_for_each_entry(ctx, head, list) {
585 		if (ctx->user_id != id)
586 			break;
587 		id++;
588 		/* Check for wrap.  If this happens, we have nearly 2^32
589 		 * allocated RSS contexts, which seems unlikely.
590 		 */
591 		if (WARN_ON_ONCE(!id))
592 			return NULL;
593 	}
594 
595 	/* Create the new entry */
596 	new = kmalloc(sizeof(*new), GFP_KERNEL);
597 	if (!new)
598 		return NULL;
599 	new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
600 	new->rx_hash_udp_4tuple = false;
601 
602 	/* Insert the new entry into the gap */
603 	new->user_id = id;
604 	list_add_tail(&new->list, &ctx->list);
605 	return new;
606 }
607 
efx_find_rss_context_entry(struct efx_nic * efx,u32 id)608 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
609 {
610 	struct list_head *head = &efx->rss_context.list;
611 	struct efx_rss_context *ctx;
612 
613 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
614 
615 	list_for_each_entry(ctx, head, list)
616 		if (ctx->user_id == id)
617 			return ctx;
618 	return NULL;
619 }
620 
efx_free_rss_context_entry(struct efx_rss_context * ctx)621 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
622 {
623 	list_del(&ctx->list);
624 	kfree(ctx);
625 }
626 
efx_set_default_rx_indir_table(struct efx_nic * efx,struct efx_rss_context * ctx)627 void efx_set_default_rx_indir_table(struct efx_nic *efx,
628 				    struct efx_rss_context *ctx)
629 {
630 	size_t i;
631 
632 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
633 		ctx->rx_indir_table[i] =
634 			ethtool_rxfh_indir_default(i, efx->rss_spread);
635 }
636 
637 /**
638  * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
639  * @spec: Specification to test
640  *
641  * Return: %true if the specification is a non-drop RX filter that
642  * matches a local MAC address I/G bit value of 1 or matches a local
643  * IPv4 or IPv6 address value in the respective multicast address
644  * range.  Otherwise %false.
645  */
efx_filter_is_mc_recipient(const struct efx_filter_spec * spec)646 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
647 {
648 	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
649 	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
650 		return false;
651 
652 	if (spec->match_flags &
653 	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
654 	    is_multicast_ether_addr(spec->loc_mac))
655 		return true;
656 
657 	if ((spec->match_flags &
658 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
659 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
660 		if (spec->ether_type == htons(ETH_P_IP) &&
661 		    ipv4_is_multicast(spec->loc_host[0]))
662 			return true;
663 		if (spec->ether_type == htons(ETH_P_IPV6) &&
664 		    ((const u8 *)spec->loc_host)[0] == 0xff)
665 			return true;
666 	}
667 
668 	return false;
669 }
670 
efx_filter_spec_equal(const struct efx_filter_spec * left,const struct efx_filter_spec * right)671 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
672 			   const struct efx_filter_spec *right)
673 {
674 	if ((left->match_flags ^ right->match_flags) |
675 	    ((left->flags ^ right->flags) &
676 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
677 		return false;
678 
679 	return memcmp(&left->vport_id, &right->vport_id,
680 		      sizeof(struct efx_filter_spec) -
681 		      offsetof(struct efx_filter_spec, vport_id)) == 0;
682 }
683 
efx_filter_spec_hash(const struct efx_filter_spec * spec)684 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
685 {
686 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
687 	return jhash2((const u32 *)&spec->vport_id,
688 		      (sizeof(struct efx_filter_spec) -
689 		       offsetof(struct efx_filter_spec, vport_id)) / 4,
690 		      0);
691 }
692 
693 #ifdef CONFIG_RFS_ACCEL
efx_rps_check_rule(struct efx_arfs_rule * rule,unsigned int filter_idx,bool * force)694 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
695 			bool *force)
696 {
697 	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
698 		/* ARFS is currently updating this entry, leave it */
699 		return false;
700 	}
701 	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
702 		/* ARFS tried and failed to update this, so it's probably out
703 		 * of date.  Remove the filter and the ARFS rule entry.
704 		 */
705 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
706 		*force = true;
707 		return true;
708 	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
709 		/* ARFS has moved on, so old filter is not needed.  Since we did
710 		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
711 		 * not be removed by efx_rps_hash_del() subsequently.
712 		 */
713 		*force = true;
714 		return true;
715 	}
716 	/* Remove it iff ARFS wants to. */
717 	return true;
718 }
719 
720 static
efx_rps_hash_bucket(struct efx_nic * efx,const struct efx_filter_spec * spec)721 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
722 				       const struct efx_filter_spec *spec)
723 {
724 	u32 hash = efx_filter_spec_hash(spec);
725 
726 	lockdep_assert_held(&efx->rps_hash_lock);
727 	if (!efx->rps_hash_table)
728 		return NULL;
729 	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
730 }
731 
efx_rps_hash_find(struct efx_nic * efx,const struct efx_filter_spec * spec)732 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
733 					const struct efx_filter_spec *spec)
734 {
735 	struct efx_arfs_rule *rule;
736 	struct hlist_head *head;
737 	struct hlist_node *node;
738 
739 	head = efx_rps_hash_bucket(efx, spec);
740 	if (!head)
741 		return NULL;
742 	hlist_for_each(node, head) {
743 		rule = container_of(node, struct efx_arfs_rule, node);
744 		if (efx_filter_spec_equal(spec, &rule->spec))
745 			return rule;
746 	}
747 	return NULL;
748 }
749 
efx_rps_hash_add(struct efx_nic * efx,const struct efx_filter_spec * spec,bool * new)750 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
751 				       const struct efx_filter_spec *spec,
752 				       bool *new)
753 {
754 	struct efx_arfs_rule *rule;
755 	struct hlist_head *head;
756 	struct hlist_node *node;
757 
758 	head = efx_rps_hash_bucket(efx, spec);
759 	if (!head)
760 		return NULL;
761 	hlist_for_each(node, head) {
762 		rule = container_of(node, struct efx_arfs_rule, node);
763 		if (efx_filter_spec_equal(spec, &rule->spec)) {
764 			*new = false;
765 			return rule;
766 		}
767 	}
768 	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
769 	*new = true;
770 	if (rule) {
771 		memcpy(&rule->spec, spec, sizeof(rule->spec));
772 		hlist_add_head(&rule->node, head);
773 	}
774 	return rule;
775 }
776 
efx_rps_hash_del(struct efx_nic * efx,const struct efx_filter_spec * spec)777 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
778 {
779 	struct efx_arfs_rule *rule;
780 	struct hlist_head *head;
781 	struct hlist_node *node;
782 
783 	head = efx_rps_hash_bucket(efx, spec);
784 	if (WARN_ON(!head))
785 		return;
786 	hlist_for_each(node, head) {
787 		rule = container_of(node, struct efx_arfs_rule, node);
788 		if (efx_filter_spec_equal(spec, &rule->spec)) {
789 			/* Someone already reused the entry.  We know that if
790 			 * this check doesn't fire (i.e. filter_id == REMOVING)
791 			 * then the REMOVING mark was put there by our caller,
792 			 * because caller is holding a lock on filter table and
793 			 * only holders of that lock set REMOVING.
794 			 */
795 			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
796 				return;
797 			hlist_del(node);
798 			kfree(rule);
799 			return;
800 		}
801 	}
802 	/* We didn't find it. */
803 	WARN_ON(1);
804 }
805 #endif
806 
efx_probe_filters(struct efx_nic * efx)807 int efx_probe_filters(struct efx_nic *efx)
808 {
809 	int rc;
810 
811 	mutex_lock(&efx->mac_lock);
812 	down_write(&efx->filter_sem);
813 	rc = efx->type->filter_table_probe(efx);
814 	if (rc)
815 		goto out_unlock;
816 
817 #ifdef CONFIG_RFS_ACCEL
818 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
819 		struct efx_channel *channel;
820 		int i, success = 1;
821 
822 		efx_for_each_channel(channel, efx) {
823 			channel->rps_flow_id =
824 				kcalloc(efx->type->max_rx_ip_filters,
825 					sizeof(*channel->rps_flow_id),
826 					GFP_KERNEL);
827 			if (!channel->rps_flow_id)
828 				success = 0;
829 			else
830 				for (i = 0;
831 				     i < efx->type->max_rx_ip_filters;
832 				     ++i)
833 					channel->rps_flow_id[i] =
834 						RPS_FLOW_ID_INVALID;
835 			channel->rfs_expire_index = 0;
836 			channel->rfs_filter_count = 0;
837 		}
838 
839 		if (!success) {
840 			efx_for_each_channel(channel, efx) {
841 				kfree(channel->rps_flow_id);
842 				channel->rps_flow_id = NULL;
843 			}
844 			efx->type->filter_table_remove(efx);
845 			rc = -ENOMEM;
846 			goto out_unlock;
847 		}
848 	}
849 #endif
850 out_unlock:
851 	up_write(&efx->filter_sem);
852 	mutex_unlock(&efx->mac_lock);
853 	return rc;
854 }
855 
efx_remove_filters(struct efx_nic * efx)856 void efx_remove_filters(struct efx_nic *efx)
857 {
858 #ifdef CONFIG_RFS_ACCEL
859 	struct efx_channel *channel;
860 
861 	efx_for_each_channel(channel, efx) {
862 		cancel_delayed_work_sync(&channel->filter_work);
863 		kfree(channel->rps_flow_id);
864 		channel->rps_flow_id = NULL;
865 	}
866 #endif
867 	down_write(&efx->filter_sem);
868 	efx->type->filter_table_remove(efx);
869 	up_write(&efx->filter_sem);
870 }
871 
872 #ifdef CONFIG_RFS_ACCEL
873 
efx_filter_rfs_work(struct work_struct * data)874 static void efx_filter_rfs_work(struct work_struct *data)
875 {
876 	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
877 							      work);
878 	struct efx_nic *efx = netdev_priv(req->net_dev);
879 	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
880 	int slot_idx = req - efx->rps_slot;
881 	struct efx_arfs_rule *rule;
882 	u16 arfs_id = 0;
883 	int rc;
884 
885 	rc = efx->type->filter_insert(efx, &req->spec, true);
886 	if (rc >= 0)
887 		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
888 		rc %= efx->type->max_rx_ip_filters;
889 	if (efx->rps_hash_table) {
890 		spin_lock_bh(&efx->rps_hash_lock);
891 		rule = efx_rps_hash_find(efx, &req->spec);
892 		/* The rule might have already gone, if someone else's request
893 		 * for the same spec was already worked and then expired before
894 		 * we got around to our work.  In that case we have nothing
895 		 * tying us to an arfs_id, meaning that as soon as the filter
896 		 * is considered for expiry it will be removed.
897 		 */
898 		if (rule) {
899 			if (rc < 0)
900 				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
901 			else
902 				rule->filter_id = rc;
903 			arfs_id = rule->arfs_id;
904 		}
905 		spin_unlock_bh(&efx->rps_hash_lock);
906 	}
907 	if (rc >= 0) {
908 		/* Remember this so we can check whether to expire the filter
909 		 * later.
910 		 */
911 		mutex_lock(&efx->rps_mutex);
912 		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
913 			channel->rfs_filter_count++;
914 		channel->rps_flow_id[rc] = req->flow_id;
915 		mutex_unlock(&efx->rps_mutex);
916 
917 		if (req->spec.ether_type == htons(ETH_P_IP))
918 			netif_info(efx, rx_status, efx->net_dev,
919 				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
920 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
921 				   req->spec.rem_host, ntohs(req->spec.rem_port),
922 				   req->spec.loc_host, ntohs(req->spec.loc_port),
923 				   req->rxq_index, req->flow_id, rc, arfs_id);
924 		else
925 			netif_info(efx, rx_status, efx->net_dev,
926 				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
927 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
928 				   req->spec.rem_host, ntohs(req->spec.rem_port),
929 				   req->spec.loc_host, ntohs(req->spec.loc_port),
930 				   req->rxq_index, req->flow_id, rc, arfs_id);
931 		channel->n_rfs_succeeded++;
932 	} else {
933 		if (req->spec.ether_type == htons(ETH_P_IP))
934 			netif_dbg(efx, rx_status, efx->net_dev,
935 				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
936 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
937 				  req->spec.rem_host, ntohs(req->spec.rem_port),
938 				  req->spec.loc_host, ntohs(req->spec.loc_port),
939 				  req->rxq_index, req->flow_id, rc, arfs_id);
940 		else
941 			netif_dbg(efx, rx_status, efx->net_dev,
942 				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
943 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
944 				  req->spec.rem_host, ntohs(req->spec.rem_port),
945 				  req->spec.loc_host, ntohs(req->spec.loc_port),
946 				  req->rxq_index, req->flow_id, rc, arfs_id);
947 		channel->n_rfs_failed++;
948 		/* We're overloading the NIC's filter tables, so let's do a
949 		 * chunk of extra expiry work.
950 		 */
951 		__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
952 						     100u));
953 	}
954 
955 	/* Release references */
956 	clear_bit(slot_idx, &efx->rps_slot_map);
957 	dev_put(req->net_dev);
958 }
959 
efx_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)960 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
961 		   u16 rxq_index, u32 flow_id)
962 {
963 	struct efx_nic *efx = netdev_priv(net_dev);
964 	struct efx_async_filter_insertion *req;
965 	struct efx_arfs_rule *rule;
966 	struct flow_keys fk;
967 	int slot_idx;
968 	bool new;
969 	int rc;
970 
971 	/* find a free slot */
972 	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
973 		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
974 			break;
975 	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
976 		return -EBUSY;
977 
978 	if (flow_id == RPS_FLOW_ID_INVALID) {
979 		rc = -EINVAL;
980 		goto out_clear;
981 	}
982 
983 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
984 		rc = -EPROTONOSUPPORT;
985 		goto out_clear;
986 	}
987 
988 	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
989 		rc = -EPROTONOSUPPORT;
990 		goto out_clear;
991 	}
992 	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
993 		rc = -EPROTONOSUPPORT;
994 		goto out_clear;
995 	}
996 
997 	req = efx->rps_slot + slot_idx;
998 	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
999 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1000 			   rxq_index);
1001 	req->spec.match_flags =
1002 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
1003 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
1004 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
1005 	req->spec.ether_type = fk.basic.n_proto;
1006 	req->spec.ip_proto = fk.basic.ip_proto;
1007 
1008 	if (fk.basic.n_proto == htons(ETH_P_IP)) {
1009 		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
1010 		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
1011 	} else {
1012 		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
1013 		       sizeof(struct in6_addr));
1014 		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
1015 		       sizeof(struct in6_addr));
1016 	}
1017 
1018 	req->spec.rem_port = fk.ports.src;
1019 	req->spec.loc_port = fk.ports.dst;
1020 
1021 	if (efx->rps_hash_table) {
1022 		/* Add it to ARFS hash table */
1023 		spin_lock(&efx->rps_hash_lock);
1024 		rule = efx_rps_hash_add(efx, &req->spec, &new);
1025 		if (!rule) {
1026 			rc = -ENOMEM;
1027 			goto out_unlock;
1028 		}
1029 		if (new)
1030 			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1031 		rc = rule->arfs_id;
1032 		/* Skip if existing or pending filter already does the right thing */
1033 		if (!new && rule->rxq_index == rxq_index &&
1034 		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1035 			goto out_unlock;
1036 		rule->rxq_index = rxq_index;
1037 		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1038 		spin_unlock(&efx->rps_hash_lock);
1039 	} else {
1040 		/* Without an ARFS hash table, we just use arfs_id 0 for all
1041 		 * filters.  This means if multiple flows hash to the same
1042 		 * flow_id, all but the most recently touched will be eligible
1043 		 * for expiry.
1044 		 */
1045 		rc = 0;
1046 	}
1047 
1048 	/* Queue the request */
1049 	dev_hold(req->net_dev = net_dev);
1050 	INIT_WORK(&req->work, efx_filter_rfs_work);
1051 	req->rxq_index = rxq_index;
1052 	req->flow_id = flow_id;
1053 	schedule_work(&req->work);
1054 	return rc;
1055 out_unlock:
1056 	spin_unlock(&efx->rps_hash_lock);
1057 out_clear:
1058 	clear_bit(slot_idx, &efx->rps_slot_map);
1059 	return rc;
1060 }
1061 
__efx_filter_rfs_expire(struct efx_channel * channel,unsigned int quota)1062 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1063 {
1064 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1065 	struct efx_nic *efx = channel->efx;
1066 	unsigned int index, size, start;
1067 	u32 flow_id;
1068 
1069 	if (!mutex_trylock(&efx->rps_mutex))
1070 		return false;
1071 	expire_one = efx->type->filter_rfs_expire_one;
1072 	index = channel->rfs_expire_index;
1073 	start = index;
1074 	size = efx->type->max_rx_ip_filters;
1075 	while (quota) {
1076 		flow_id = channel->rps_flow_id[index];
1077 
1078 		if (flow_id != RPS_FLOW_ID_INVALID) {
1079 			quota--;
1080 			if (expire_one(efx, flow_id, index)) {
1081 				netif_info(efx, rx_status, efx->net_dev,
1082 					   "expired filter %d [channel %u flow %u]\n",
1083 					   index, channel->channel, flow_id);
1084 				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1085 				channel->rfs_filter_count--;
1086 			}
1087 		}
1088 		if (++index == size)
1089 			index = 0;
1090 		/* If we were called with a quota that exceeds the total number
1091 		 * of filters in the table (which shouldn't happen, but could
1092 		 * if two callers race), ensure that we don't loop forever -
1093 		 * stop when we've examined every row of the table.
1094 		 */
1095 		if (index == start)
1096 			break;
1097 	}
1098 
1099 	channel->rfs_expire_index = index;
1100 	mutex_unlock(&efx->rps_mutex);
1101 	return true;
1102 }
1103 
1104 #endif /* CONFIG_RFS_ACCEL */
1105