• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2010 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include <linux/in.h>
11 #include <net/ip.h>
12 #include "efx.h"
13 #include "filter.h"
14 #include "io.h"
15 #include "nic.h"
16 #include "regs.h"
17 
18 /* "Fudge factors" - difference between programmed value and actual depth.
19  * Due to pipelined implementation we need to program H/W with a value that
20  * is larger than the hop limit we want.
21  */
22 #define FILTER_CTL_SRCH_FUDGE_WILD 3
23 #define FILTER_CTL_SRCH_FUDGE_FULL 1
24 
25 /* Hard maximum hop limit.  Hardware will time-out beyond 200-something.
26  * We also need to avoid infinite loops in efx_filter_search() when the
27  * table is full.
28  */
29 #define FILTER_CTL_SRCH_MAX 200
30 
31 /* Don't try very hard to find space for performance hints, as this is
32  * counter-productive. */
33 #define FILTER_CTL_SRCH_HINT_MAX 5
34 
35 enum efx_filter_table_id {
36 	EFX_FILTER_TABLE_RX_IP = 0,
37 	EFX_FILTER_TABLE_RX_MAC,
38 	EFX_FILTER_TABLE_RX_DEF,
39 	EFX_FILTER_TABLE_TX_MAC,
40 	EFX_FILTER_TABLE_COUNT,
41 };
42 
43 enum efx_filter_index {
44 	EFX_FILTER_INDEX_UC_DEF,
45 	EFX_FILTER_INDEX_MC_DEF,
46 	EFX_FILTER_SIZE_RX_DEF,
47 };
48 
49 struct efx_filter_table {
50 	enum efx_filter_table_id id;
51 	u32		offset;		/* address of table relative to BAR */
52 	unsigned	size;		/* number of entries */
53 	unsigned	step;		/* step between entries */
54 	unsigned	used;		/* number currently used */
55 	unsigned long	*used_bitmap;
56 	struct efx_filter_spec *spec;
57 	unsigned	search_depth[EFX_FILTER_TYPE_COUNT];
58 };
59 
60 struct efx_filter_state {
61 	spinlock_t	lock;
62 	struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
63 #ifdef CONFIG_RFS_ACCEL
64 	u32		*rps_flow_id;
65 	unsigned	rps_expire_index;
66 #endif
67 };
68 
69 static void efx_filter_table_clear_entry(struct efx_nic *efx,
70 					 struct efx_filter_table *table,
71 					 unsigned int filter_idx);
72 
73 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
74  * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
efx_filter_hash(u32 key)75 static u16 efx_filter_hash(u32 key)
76 {
77 	u16 tmp;
78 
79 	/* First 16 rounds */
80 	tmp = 0x1fff ^ key >> 16;
81 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82 	tmp = tmp ^ tmp >> 9;
83 	/* Last 16 rounds */
84 	tmp = tmp ^ tmp << 13 ^ key;
85 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
86 	return tmp ^ tmp >> 9;
87 }
88 
89 /* To allow for hash collisions, filter search continues at these
90  * increments from the first possible entry selected by the hash. */
efx_filter_increment(u32 key)91 static u16 efx_filter_increment(u32 key)
92 {
93 	return key * 2 - 1;
94 }
95 
96 static enum efx_filter_table_id
efx_filter_spec_table_id(const struct efx_filter_spec * spec)97 efx_filter_spec_table_id(const struct efx_filter_spec *spec)
98 {
99 	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
100 	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
101 	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
102 	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
103 	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
104 	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
105 	BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
106 	EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
107 	return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
108 }
109 
110 static struct efx_filter_table *
efx_filter_spec_table(struct efx_filter_state * state,const struct efx_filter_spec * spec)111 efx_filter_spec_table(struct efx_filter_state *state,
112 		      const struct efx_filter_spec *spec)
113 {
114 	if (spec->type == EFX_FILTER_UNSPEC)
115 		return NULL;
116 	else
117 		return &state->table[efx_filter_spec_table_id(spec)];
118 }
119 
efx_filter_table_reset_search_depth(struct efx_filter_table * table)120 static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
121 {
122 	memset(table->search_depth, 0, sizeof(table->search_depth));
123 }
124 
efx_filter_push_rx_config(struct efx_nic * efx)125 static void efx_filter_push_rx_config(struct efx_nic *efx)
126 {
127 	struct efx_filter_state *state = efx->filter_state;
128 	struct efx_filter_table *table;
129 	efx_oword_t filter_ctl;
130 
131 	efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
132 
133 	table = &state->table[EFX_FILTER_TABLE_RX_IP];
134 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
135 			    table->search_depth[EFX_FILTER_TCP_FULL] +
136 			    FILTER_CTL_SRCH_FUDGE_FULL);
137 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
138 			    table->search_depth[EFX_FILTER_TCP_WILD] +
139 			    FILTER_CTL_SRCH_FUDGE_WILD);
140 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
141 			    table->search_depth[EFX_FILTER_UDP_FULL] +
142 			    FILTER_CTL_SRCH_FUDGE_FULL);
143 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
144 			    table->search_depth[EFX_FILTER_UDP_WILD] +
145 			    FILTER_CTL_SRCH_FUDGE_WILD);
146 
147 	table = &state->table[EFX_FILTER_TABLE_RX_MAC];
148 	if (table->size) {
149 		EFX_SET_OWORD_FIELD(
150 			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
151 			table->search_depth[EFX_FILTER_MAC_FULL] +
152 			FILTER_CTL_SRCH_FUDGE_FULL);
153 		EFX_SET_OWORD_FIELD(
154 			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
155 			table->search_depth[EFX_FILTER_MAC_WILD] +
156 			FILTER_CTL_SRCH_FUDGE_WILD);
157 	}
158 
159 	table = &state->table[EFX_FILTER_TABLE_RX_DEF];
160 	if (table->size) {
161 		EFX_SET_OWORD_FIELD(
162 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
163 			table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
164 		EFX_SET_OWORD_FIELD(
165 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
166 			!!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 			   EFX_FILTER_FLAG_RX_RSS));
168 		EFX_SET_OWORD_FIELD(
169 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 			table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 		EFX_SET_OWORD_FIELD(
172 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 			!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 			   EFX_FILTER_FLAG_RX_RSS));
175 
176 		/* There is a single bit to enable RX scatter for all
177 		 * unmatched packets.  Only set it if scatter is
178 		 * enabled in both filter specs.
179 		 */
180 		EFX_SET_OWORD_FIELD(
181 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
182 			!!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
183 			   table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
184 			   EFX_FILTER_FLAG_RX_SCATTER));
185 	} else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
186 		/* We don't expose 'default' filters because unmatched
187 		 * packets always go to the queue number found in the
188 		 * RSS table.  But we still need to set the RX scatter
189 		 * bit here.
190 		 */
191 		EFX_SET_OWORD_FIELD(
192 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
193 			efx->rx_scatter);
194 	}
195 
196 	efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
197 }
198 
efx_filter_push_tx_limits(struct efx_nic * efx)199 static void efx_filter_push_tx_limits(struct efx_nic *efx)
200 {
201 	struct efx_filter_state *state = efx->filter_state;
202 	struct efx_filter_table *table;
203 	efx_oword_t tx_cfg;
204 
205 	efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
206 
207 	table = &state->table[EFX_FILTER_TABLE_TX_MAC];
208 	if (table->size) {
209 		EFX_SET_OWORD_FIELD(
210 			tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
211 			table->search_depth[EFX_FILTER_MAC_FULL] +
212 			FILTER_CTL_SRCH_FUDGE_FULL);
213 		EFX_SET_OWORD_FIELD(
214 			tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
215 			table->search_depth[EFX_FILTER_MAC_WILD] +
216 			FILTER_CTL_SRCH_FUDGE_WILD);
217 	}
218 
219 	efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
220 }
221 
__efx_filter_set_ipv4(struct efx_filter_spec * spec,__be32 host1,__be16 port1,__be32 host2,__be16 port2)222 static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
223 					 __be32 host1, __be16 port1,
224 					 __be32 host2, __be16 port2)
225 {
226 	spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
227 	spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
228 	spec->data[2] = ntohl(host2);
229 }
230 
__efx_filter_get_ipv4(const struct efx_filter_spec * spec,__be32 * host1,__be16 * port1,__be32 * host2,__be16 * port2)231 static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
232 					 __be32 *host1, __be16 *port1,
233 					 __be32 *host2, __be16 *port2)
234 {
235 	*host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
236 	*port1 = htons(spec->data[0]);
237 	*host2 = htonl(spec->data[2]);
238 	*port2 = htons(spec->data[1] >> 16);
239 }
240 
241 /**
242  * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
243  * @spec: Specification to initialise
244  * @proto: Transport layer protocol number
245  * @host: Local host address (network byte order)
246  * @port: Local port (network byte order)
247  */
efx_filter_set_ipv4_local(struct efx_filter_spec * spec,u8 proto,__be32 host,__be16 port)248 int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
249 			      __be32 host, __be16 port)
250 {
251 	__be32 host1;
252 	__be16 port1;
253 
254 	EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
255 
256 	/* This cannot currently be combined with other filtering */
257 	if (spec->type != EFX_FILTER_UNSPEC)
258 		return -EPROTONOSUPPORT;
259 
260 	if (port == 0)
261 		return -EINVAL;
262 
263 	switch (proto) {
264 	case IPPROTO_TCP:
265 		spec->type = EFX_FILTER_TCP_WILD;
266 		break;
267 	case IPPROTO_UDP:
268 		spec->type = EFX_FILTER_UDP_WILD;
269 		break;
270 	default:
271 		return -EPROTONOSUPPORT;
272 	}
273 
274 	/* Filter is constructed in terms of source and destination,
275 	 * with the odd wrinkle that the ports are swapped in a UDP
276 	 * wildcard filter.  We need to convert from local and remote
277 	 * (= zero for wildcard) addresses.
278 	 */
279 	host1 = 0;
280 	if (proto != IPPROTO_UDP) {
281 		port1 = 0;
282 	} else {
283 		port1 = port;
284 		port = 0;
285 	}
286 
287 	__efx_filter_set_ipv4(spec, host1, port1, host, port);
288 	return 0;
289 }
290 
efx_filter_get_ipv4_local(const struct efx_filter_spec * spec,u8 * proto,__be32 * host,__be16 * port)291 int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
292 			      u8 *proto, __be32 *host, __be16 *port)
293 {
294 	__be32 host1;
295 	__be16 port1;
296 
297 	switch (spec->type) {
298 	case EFX_FILTER_TCP_WILD:
299 		*proto = IPPROTO_TCP;
300 		__efx_filter_get_ipv4(spec, &host1, &port1, host, port);
301 		return 0;
302 	case EFX_FILTER_UDP_WILD:
303 		*proto = IPPROTO_UDP;
304 		__efx_filter_get_ipv4(spec, &host1, port, host, &port1);
305 		return 0;
306 	default:
307 		return -EINVAL;
308 	}
309 }
310 
311 /**
312  * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
313  * @spec: Specification to initialise
314  * @proto: Transport layer protocol number
315  * @host: Local host address (network byte order)
316  * @port: Local port (network byte order)
317  * @rhost: Remote host address (network byte order)
318  * @rport: Remote port (network byte order)
319  */
efx_filter_set_ipv4_full(struct efx_filter_spec * spec,u8 proto,__be32 host,__be16 port,__be32 rhost,__be16 rport)320 int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
321 			     __be32 host, __be16 port,
322 			     __be32 rhost, __be16 rport)
323 {
324 	EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
325 
326 	/* This cannot currently be combined with other filtering */
327 	if (spec->type != EFX_FILTER_UNSPEC)
328 		return -EPROTONOSUPPORT;
329 
330 	if (port == 0 || rport == 0)
331 		return -EINVAL;
332 
333 	switch (proto) {
334 	case IPPROTO_TCP:
335 		spec->type = EFX_FILTER_TCP_FULL;
336 		break;
337 	case IPPROTO_UDP:
338 		spec->type = EFX_FILTER_UDP_FULL;
339 		break;
340 	default:
341 		return -EPROTONOSUPPORT;
342 	}
343 
344 	__efx_filter_set_ipv4(spec, rhost, rport, host, port);
345 	return 0;
346 }
347 
efx_filter_get_ipv4_full(const struct efx_filter_spec * spec,u8 * proto,__be32 * host,__be16 * port,__be32 * rhost,__be16 * rport)348 int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
349 			     u8 *proto, __be32 *host, __be16 *port,
350 			     __be32 *rhost, __be16 *rport)
351 {
352 	switch (spec->type) {
353 	case EFX_FILTER_TCP_FULL:
354 		*proto = IPPROTO_TCP;
355 		break;
356 	case EFX_FILTER_UDP_FULL:
357 		*proto = IPPROTO_UDP;
358 		break;
359 	default:
360 		return -EINVAL;
361 	}
362 
363 	__efx_filter_get_ipv4(spec, rhost, rport, host, port);
364 	return 0;
365 }
366 
367 /**
368  * efx_filter_set_eth_local - specify local Ethernet address and optional VID
369  * @spec: Specification to initialise
370  * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
371  * @addr: Local Ethernet MAC address
372  */
efx_filter_set_eth_local(struct efx_filter_spec * spec,u16 vid,const u8 * addr)373 int efx_filter_set_eth_local(struct efx_filter_spec *spec,
374 			     u16 vid, const u8 *addr)
375 {
376 	EFX_BUG_ON_PARANOID(!(spec->flags &
377 			      (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
378 
379 	/* This cannot currently be combined with other filtering */
380 	if (spec->type != EFX_FILTER_UNSPEC)
381 		return -EPROTONOSUPPORT;
382 
383 	if (vid == EFX_FILTER_VID_UNSPEC) {
384 		spec->type = EFX_FILTER_MAC_WILD;
385 		spec->data[0] = 0;
386 	} else {
387 		spec->type = EFX_FILTER_MAC_FULL;
388 		spec->data[0] = vid;
389 	}
390 
391 	spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
392 	spec->data[2] = addr[0] << 8 | addr[1];
393 	return 0;
394 }
395 
396 /**
397  * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
398  * @spec: Specification to initialise
399  */
efx_filter_set_uc_def(struct efx_filter_spec * spec)400 int efx_filter_set_uc_def(struct efx_filter_spec *spec)
401 {
402 	EFX_BUG_ON_PARANOID(!(spec->flags &
403 			      (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
404 
405 	if (spec->type != EFX_FILTER_UNSPEC)
406 		return -EINVAL;
407 
408 	spec->type = EFX_FILTER_UC_DEF;
409 	memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
410 	return 0;
411 }
412 
413 /**
414  * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
415  * @spec: Specification to initialise
416  */
efx_filter_set_mc_def(struct efx_filter_spec * spec)417 int efx_filter_set_mc_def(struct efx_filter_spec *spec)
418 {
419 	EFX_BUG_ON_PARANOID(!(spec->flags &
420 			      (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
421 
422 	if (spec->type != EFX_FILTER_UNSPEC)
423 		return -EINVAL;
424 
425 	spec->type = EFX_FILTER_MC_DEF;
426 	memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
427 	return 0;
428 }
429 
efx_filter_reset_rx_def(struct efx_nic * efx,unsigned filter_idx)430 static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
431 {
432 	struct efx_filter_state *state = efx->filter_state;
433 	struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
434 	struct efx_filter_spec *spec = &table->spec[filter_idx];
435 	enum efx_filter_flags flags = 0;
436 
437 	/* If there's only one channel then disable RSS for non VF
438 	 * traffic, thereby allowing VFs to use RSS when the PF can't.
439 	 */
440 	if (efx->n_rx_channels > 1)
441 		flags |= EFX_FILTER_FLAG_RX_RSS;
442 
443 	if (efx->rx_scatter)
444 		flags |= EFX_FILTER_FLAG_RX_SCATTER;
445 
446 	efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
447 	spec->type = EFX_FILTER_UC_DEF + filter_idx;
448 	table->used_bitmap[0] |= 1 << filter_idx;
449 }
450 
efx_filter_get_eth_local(const struct efx_filter_spec * spec,u16 * vid,u8 * addr)451 int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
452 			     u16 *vid, u8 *addr)
453 {
454 	switch (spec->type) {
455 	case EFX_FILTER_MAC_WILD:
456 		*vid = EFX_FILTER_VID_UNSPEC;
457 		break;
458 	case EFX_FILTER_MAC_FULL:
459 		*vid = spec->data[0];
460 		break;
461 	default:
462 		return -EINVAL;
463 	}
464 
465 	addr[0] = spec->data[2] >> 8;
466 	addr[1] = spec->data[2];
467 	addr[2] = spec->data[1] >> 24;
468 	addr[3] = spec->data[1] >> 16;
469 	addr[4] = spec->data[1] >> 8;
470 	addr[5] = spec->data[1];
471 	return 0;
472 }
473 
474 /* Build a filter entry and return its n-tuple key. */
efx_filter_build(efx_oword_t * filter,struct efx_filter_spec * spec)475 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
476 {
477 	u32 data3;
478 
479 	switch (efx_filter_spec_table_id(spec)) {
480 	case EFX_FILTER_TABLE_RX_IP: {
481 		bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
482 			       spec->type == EFX_FILTER_UDP_WILD);
483 		EFX_POPULATE_OWORD_7(
484 			*filter,
485 			FRF_BZ_RSS_EN,
486 			!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 			FRF_BZ_SCATTER_EN,
488 			!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 			FRF_BZ_TCP_UDP, is_udp,
490 			FRF_BZ_RXQ_ID, spec->dmaq_id,
491 			EFX_DWORD_2, spec->data[2],
492 			EFX_DWORD_1, spec->data[1],
493 			EFX_DWORD_0, spec->data[0]);
494 		data3 = is_udp;
495 		break;
496 	}
497 
498 	case EFX_FILTER_TABLE_RX_MAC: {
499 		bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
500 		EFX_POPULATE_OWORD_7(
501 			*filter,
502 			FRF_CZ_RMFT_RSS_EN,
503 			!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
504 			FRF_CZ_RMFT_SCATTER_EN,
505 			!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
506 			FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
507 			FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
508 			FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
509 			FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
510 			FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
511 		data3 = is_wild;
512 		break;
513 	}
514 
515 	case EFX_FILTER_TABLE_TX_MAC: {
516 		bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
517 		EFX_POPULATE_OWORD_5(*filter,
518 				     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
519 				     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
520 				     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
521 				     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
522 				     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
523 		data3 = is_wild | spec->dmaq_id << 1;
524 		break;
525 	}
526 
527 	default:
528 		BUG();
529 	}
530 
531 	return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
532 }
533 
efx_filter_equal(const struct efx_filter_spec * left,const struct efx_filter_spec * right)534 static bool efx_filter_equal(const struct efx_filter_spec *left,
535 			     const struct efx_filter_spec *right)
536 {
537 	if (left->type != right->type ||
538 	    memcmp(left->data, right->data, sizeof(left->data)))
539 		return false;
540 
541 	if (left->flags & EFX_FILTER_FLAG_TX &&
542 	    left->dmaq_id != right->dmaq_id)
543 		return false;
544 
545 	return true;
546 }
547 
548 /*
549  * Construct/deconstruct external filter IDs.  At least the RX filter
550  * IDs must be ordered by matching priority, for RX NFC semantics.
551  *
552  * Deconstruction needs to be robust against invalid IDs so that
553  * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
554  * accept user-provided IDs.
555  */
556 
557 #define EFX_FILTER_MATCH_PRI_COUNT	5
558 
559 static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
560 	[EFX_FILTER_TCP_FULL]	= 0,
561 	[EFX_FILTER_UDP_FULL]	= 0,
562 	[EFX_FILTER_TCP_WILD]	= 1,
563 	[EFX_FILTER_UDP_WILD]	= 1,
564 	[EFX_FILTER_MAC_FULL]	= 2,
565 	[EFX_FILTER_MAC_WILD]	= 3,
566 	[EFX_FILTER_UC_DEF]	= 4,
567 	[EFX_FILTER_MC_DEF]	= 4,
568 };
569 
570 static const enum efx_filter_table_id efx_filter_range_table[] = {
571 	EFX_FILTER_TABLE_RX_IP,		/* RX match pri 0 */
572 	EFX_FILTER_TABLE_RX_IP,
573 	EFX_FILTER_TABLE_RX_MAC,
574 	EFX_FILTER_TABLE_RX_MAC,
575 	EFX_FILTER_TABLE_RX_DEF,	/* RX match pri 4 */
576 	EFX_FILTER_TABLE_COUNT,		/* TX match pri 0; invalid */
577 	EFX_FILTER_TABLE_COUNT,		/* invalid */
578 	EFX_FILTER_TABLE_TX_MAC,
579 	EFX_FILTER_TABLE_TX_MAC,	/* TX match pri 3 */
580 };
581 
582 #define EFX_FILTER_INDEX_WIDTH	13
583 #define EFX_FILTER_INDEX_MASK	((1 << EFX_FILTER_INDEX_WIDTH) - 1)
584 
585 static inline u32
efx_filter_make_id(const struct efx_filter_spec * spec,unsigned int index)586 efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
587 {
588 	unsigned int range;
589 
590 	range = efx_filter_type_match_pri[spec->type];
591 	if (!(spec->flags & EFX_FILTER_FLAG_RX))
592 		range += EFX_FILTER_MATCH_PRI_COUNT;
593 
594 	return range << EFX_FILTER_INDEX_WIDTH | index;
595 }
596 
efx_filter_id_table_id(u32 id)597 static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
598 {
599 	unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
600 
601 	if (range < ARRAY_SIZE(efx_filter_range_table))
602 		return efx_filter_range_table[range];
603 	else
604 		return EFX_FILTER_TABLE_COUNT; /* invalid */
605 }
606 
efx_filter_id_index(u32 id)607 static inline unsigned int efx_filter_id_index(u32 id)
608 {
609 	return id & EFX_FILTER_INDEX_MASK;
610 }
611 
efx_filter_id_flags(u32 id)612 static inline u8 efx_filter_id_flags(u32 id)
613 {
614 	unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
615 
616 	if (range < EFX_FILTER_MATCH_PRI_COUNT)
617 		return EFX_FILTER_FLAG_RX;
618 	else
619 		return EFX_FILTER_FLAG_TX;
620 }
621 
efx_filter_get_rx_id_limit(struct efx_nic * efx)622 u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
623 {
624 	struct efx_filter_state *state = efx->filter_state;
625 	unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
626 	enum efx_filter_table_id table_id;
627 
628 	do {
629 		table_id = efx_filter_range_table[range];
630 		if (state->table[table_id].size != 0)
631 			return range << EFX_FILTER_INDEX_WIDTH |
632 				state->table[table_id].size;
633 	} while (range--);
634 
635 	return 0;
636 }
637 
638 /**
639  * efx_filter_insert_filter - add or replace a filter
640  * @efx: NIC in which to insert the filter
641  * @spec: Specification for the filter
642  * @replace_equal: Flag for whether the specified filter may replace an
643  *	existing filter with equal priority
644  *
645  * On success, return the filter ID.
646  * On failure, return a negative error code.
647  *
648  * If an existing filter has equal match values to the new filter
649  * spec, then the new filter might replace it, depending on the
650  * relative priorities.  If the existing filter has lower priority, or
651  * if @replace_equal is set and it has equal priority, then it is
652  * replaced.  Otherwise the function fails, returning -%EPERM if
653  * the existing filter has higher priority or -%EEXIST if it has
654  * equal priority.
655  */
efx_filter_insert_filter(struct efx_nic * efx,struct efx_filter_spec * spec,bool replace_equal)656 s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
657 			     bool replace_equal)
658 {
659 	struct efx_filter_state *state = efx->filter_state;
660 	struct efx_filter_table *table = efx_filter_spec_table(state, spec);
661 	efx_oword_t filter;
662 	int rep_index, ins_index;
663 	unsigned int depth = 0;
664 	int rc;
665 
666 	if (!table || table->size == 0)
667 		return -EINVAL;
668 
669 	netif_vdbg(efx, hw, efx->net_dev,
670 		   "%s: type %d search_depth=%d", __func__, spec->type,
671 		   table->search_depth[spec->type]);
672 
673 	if (table->id == EFX_FILTER_TABLE_RX_DEF) {
674 		/* One filter spec per type */
675 		BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 		BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 			     EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 		rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
679 		ins_index = rep_index;
680 
681 		spin_lock_bh(&state->lock);
682 	} else {
683 		/* Search concurrently for
684 		 * (1) a filter to be replaced (rep_index): any filter
685 		 *     with the same match values, up to the current
686 		 *     search depth for this type, and
687 		 * (2) the insertion point (ins_index): (1) or any
688 		 *     free slot before it or up to the maximum search
689 		 *     depth for this priority
690 		 * We fail if we cannot find (2).
691 		 *
692 		 * We can stop once either
693 		 * (a) we find (1), in which case we have definitely
694 		 *     found (2) as well; or
695 		 * (b) we have searched exhaustively for (1), and have
696 		 *     either found (2) or searched exhaustively for it
697 		 */
698 		u32 key = efx_filter_build(&filter, spec);
699 		unsigned int hash = efx_filter_hash(key);
700 		unsigned int incr = efx_filter_increment(key);
701 		unsigned int max_rep_depth = table->search_depth[spec->type];
702 		unsigned int max_ins_depth =
703 			spec->priority <= EFX_FILTER_PRI_HINT ?
704 			FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
705 		unsigned int i = hash & (table->size - 1);
706 
707 		ins_index = -1;
708 		depth = 1;
709 
710 		spin_lock_bh(&state->lock);
711 
712 		for (;;) {
713 			if (!test_bit(i, table->used_bitmap)) {
714 				if (ins_index < 0)
715 					ins_index = i;
716 			} else if (efx_filter_equal(spec, &table->spec[i])) {
717 				/* Case (a) */
718 				if (ins_index < 0)
719 					ins_index = i;
720 				rep_index = i;
721 				break;
722 			}
723 
724 			if (depth >= max_rep_depth &&
725 			    (ins_index >= 0 || depth >= max_ins_depth)) {
726 				/* Case (b) */
727 				if (ins_index < 0) {
728 					rc = -EBUSY;
729 					goto out;
730 				}
731 				rep_index = -1;
732 				break;
733 			}
734 
735 			i = (i + incr) & (table->size - 1);
736 			++depth;
737 		}
738 	}
739 
740 	/* If we found a filter to be replaced, check whether we
741 	 * should do so
742 	 */
743 	if (rep_index >= 0) {
744 		struct efx_filter_spec *saved_spec = &table->spec[rep_index];
745 
746 		if (spec->priority == saved_spec->priority && !replace_equal) {
747 			rc = -EEXIST;
748 			goto out;
749 		}
750 		if (spec->priority < saved_spec->priority) {
751 			rc = -EPERM;
752 			goto out;
753 		}
754 	}
755 
756 	/* Insert the filter */
757 	if (ins_index != rep_index) {
758 		__set_bit(ins_index, table->used_bitmap);
759 		++table->used;
760 	}
761 	table->spec[ins_index] = *spec;
762 
763 	if (table->id == EFX_FILTER_TABLE_RX_DEF) {
764 		efx_filter_push_rx_config(efx);
765 	} else {
766 		if (table->search_depth[spec->type] < depth) {
767 			table->search_depth[spec->type] = depth;
768 			if (spec->flags & EFX_FILTER_FLAG_TX)
769 				efx_filter_push_tx_limits(efx);
770 			else
771 				efx_filter_push_rx_config(efx);
772 		}
773 
774 		efx_writeo(efx, &filter,
775 			   table->offset + table->step * ins_index);
776 
777 		/* If we were able to replace a filter by inserting
778 		 * at a lower depth, clear the replaced filter
779 		 */
780 		if (ins_index != rep_index && rep_index >= 0)
781 			efx_filter_table_clear_entry(efx, table, rep_index);
782 	}
783 
784 	netif_vdbg(efx, hw, efx->net_dev,
785 		   "%s: filter type %d index %d rxq %u set",
786 		   __func__, spec->type, ins_index, spec->dmaq_id);
787 	rc = efx_filter_make_id(spec, ins_index);
788 
789 out:
790 	spin_unlock_bh(&state->lock);
791 	return rc;
792 }
793 
efx_filter_table_clear_entry(struct efx_nic * efx,struct efx_filter_table * table,unsigned int filter_idx)794 static void efx_filter_table_clear_entry(struct efx_nic *efx,
795 					 struct efx_filter_table *table,
796 					 unsigned int filter_idx)
797 {
798 	static efx_oword_t filter;
799 
800 	if (table->id == EFX_FILTER_TABLE_RX_DEF) {
801 		/* RX default filters must always exist */
802 		efx_filter_reset_rx_def(efx, filter_idx);
803 		efx_filter_push_rx_config(efx);
804 	} else if (test_bit(filter_idx, table->used_bitmap)) {
805 		__clear_bit(filter_idx, table->used_bitmap);
806 		--table->used;
807 		memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
808 
809 		efx_writeo(efx, &filter,
810 			   table->offset + table->step * filter_idx);
811 	}
812 }
813 
814 /**
815  * efx_filter_remove_id_safe - remove a filter by ID, carefully
816  * @efx: NIC from which to remove the filter
817  * @priority: Priority of filter, as passed to @efx_filter_insert_filter
818  * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
819  *
820  * This function will range-check @filter_id, so it is safe to call
821  * with a value passed from userland.
822  */
efx_filter_remove_id_safe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id)823 int efx_filter_remove_id_safe(struct efx_nic *efx,
824 			      enum efx_filter_priority priority,
825 			      u32 filter_id)
826 {
827 	struct efx_filter_state *state = efx->filter_state;
828 	enum efx_filter_table_id table_id;
829 	struct efx_filter_table *table;
830 	unsigned int filter_idx;
831 	struct efx_filter_spec *spec;
832 	u8 filter_flags;
833 	int rc;
834 
835 	table_id = efx_filter_id_table_id(filter_id);
836 	if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
837 		return -ENOENT;
838 	table = &state->table[table_id];
839 
840 	filter_idx = efx_filter_id_index(filter_id);
841 	if (filter_idx >= table->size)
842 		return -ENOENT;
843 	spec = &table->spec[filter_idx];
844 
845 	filter_flags = efx_filter_id_flags(filter_id);
846 
847 	spin_lock_bh(&state->lock);
848 
849 	if (test_bit(filter_idx, table->used_bitmap) &&
850 	    spec->priority == priority) {
851 		efx_filter_table_clear_entry(efx, table, filter_idx);
852 		if (table->used == 0)
853 			efx_filter_table_reset_search_depth(table);
854 		rc = 0;
855 	} else {
856 		rc = -ENOENT;
857 	}
858 
859 	spin_unlock_bh(&state->lock);
860 
861 	return rc;
862 }
863 
864 /**
865  * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
866  * @efx: NIC from which to remove the filter
867  * @priority: Priority of filter, as passed to @efx_filter_insert_filter
868  * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
869  * @spec: Buffer in which to store filter specification
870  *
871  * This function will range-check @filter_id, so it is safe to call
872  * with a value passed from userland.
873  */
efx_filter_get_filter_safe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id,struct efx_filter_spec * spec_buf)874 int efx_filter_get_filter_safe(struct efx_nic *efx,
875 			       enum efx_filter_priority priority,
876 			       u32 filter_id, struct efx_filter_spec *spec_buf)
877 {
878 	struct efx_filter_state *state = efx->filter_state;
879 	enum efx_filter_table_id table_id;
880 	struct efx_filter_table *table;
881 	struct efx_filter_spec *spec;
882 	unsigned int filter_idx;
883 	u8 filter_flags;
884 	int rc;
885 
886 	table_id = efx_filter_id_table_id(filter_id);
887 	if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
888 		return -ENOENT;
889 	table = &state->table[table_id];
890 
891 	filter_idx = efx_filter_id_index(filter_id);
892 	if (filter_idx >= table->size)
893 		return -ENOENT;
894 	spec = &table->spec[filter_idx];
895 
896 	filter_flags = efx_filter_id_flags(filter_id);
897 
898 	spin_lock_bh(&state->lock);
899 
900 	if (test_bit(filter_idx, table->used_bitmap) &&
901 	    spec->priority == priority) {
902 		*spec_buf = *spec;
903 		rc = 0;
904 	} else {
905 		rc = -ENOENT;
906 	}
907 
908 	spin_unlock_bh(&state->lock);
909 
910 	return rc;
911 }
912 
efx_filter_table_clear(struct efx_nic * efx,enum efx_filter_table_id table_id,enum efx_filter_priority priority)913 static void efx_filter_table_clear(struct efx_nic *efx,
914 				   enum efx_filter_table_id table_id,
915 				   enum efx_filter_priority priority)
916 {
917 	struct efx_filter_state *state = efx->filter_state;
918 	struct efx_filter_table *table = &state->table[table_id];
919 	unsigned int filter_idx;
920 
921 	spin_lock_bh(&state->lock);
922 
923 	for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
924 		if (table->spec[filter_idx].priority <= priority)
925 			efx_filter_table_clear_entry(efx, table, filter_idx);
926 	if (table->used == 0)
927 		efx_filter_table_reset_search_depth(table);
928 
929 	spin_unlock_bh(&state->lock);
930 }
931 
932 /**
933  * efx_filter_clear_rx - remove RX filters by priority
934  * @efx: NIC from which to remove the filters
935  * @priority: Maximum priority to remove
936  */
efx_filter_clear_rx(struct efx_nic * efx,enum efx_filter_priority priority)937 void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
938 {
939 	efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
940 	efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
941 }
942 
efx_filter_count_rx_used(struct efx_nic * efx,enum efx_filter_priority priority)943 u32 efx_filter_count_rx_used(struct efx_nic *efx,
944 			     enum efx_filter_priority priority)
945 {
946 	struct efx_filter_state *state = efx->filter_state;
947 	enum efx_filter_table_id table_id;
948 	struct efx_filter_table *table;
949 	unsigned int filter_idx;
950 	u32 count = 0;
951 
952 	spin_lock_bh(&state->lock);
953 
954 	for (table_id = EFX_FILTER_TABLE_RX_IP;
955 	     table_id <= EFX_FILTER_TABLE_RX_DEF;
956 	     table_id++) {
957 		table = &state->table[table_id];
958 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
959 			if (test_bit(filter_idx, table->used_bitmap) &&
960 			    table->spec[filter_idx].priority == priority)
961 				++count;
962 		}
963 	}
964 
965 	spin_unlock_bh(&state->lock);
966 
967 	return count;
968 }
969 
efx_filter_get_rx_ids(struct efx_nic * efx,enum efx_filter_priority priority,u32 * buf,u32 size)970 s32 efx_filter_get_rx_ids(struct efx_nic *efx,
971 			  enum efx_filter_priority priority,
972 			  u32 *buf, u32 size)
973 {
974 	struct efx_filter_state *state = efx->filter_state;
975 	enum efx_filter_table_id table_id;
976 	struct efx_filter_table *table;
977 	unsigned int filter_idx;
978 	s32 count = 0;
979 
980 	spin_lock_bh(&state->lock);
981 
982 	for (table_id = EFX_FILTER_TABLE_RX_IP;
983 	     table_id <= EFX_FILTER_TABLE_RX_DEF;
984 	     table_id++) {
985 		table = &state->table[table_id];
986 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
987 			if (test_bit(filter_idx, table->used_bitmap) &&
988 			    table->spec[filter_idx].priority == priority) {
989 				if (count == size) {
990 					count = -EMSGSIZE;
991 					goto out;
992 				}
993 				buf[count++] = efx_filter_make_id(
994 					&table->spec[filter_idx], filter_idx);
995 			}
996 		}
997 	}
998 out:
999 	spin_unlock_bh(&state->lock);
1000 
1001 	return count;
1002 }
1003 
1004 /* Restore filter stater after reset */
efx_restore_filters(struct efx_nic * efx)1005 void efx_restore_filters(struct efx_nic *efx)
1006 {
1007 	struct efx_filter_state *state = efx->filter_state;
1008 	enum efx_filter_table_id table_id;
1009 	struct efx_filter_table *table;
1010 	efx_oword_t filter;
1011 	unsigned int filter_idx;
1012 
1013 	spin_lock_bh(&state->lock);
1014 
1015 	for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1016 		table = &state->table[table_id];
1017 
1018 		/* Check whether this is a regular register table */
1019 		if (table->step == 0)
1020 			continue;
1021 
1022 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1023 			if (!test_bit(filter_idx, table->used_bitmap))
1024 				continue;
1025 			efx_filter_build(&filter, &table->spec[filter_idx]);
1026 			efx_writeo(efx, &filter,
1027 				   table->offset + table->step * filter_idx);
1028 		}
1029 	}
1030 
1031 	efx_filter_push_rx_config(efx);
1032 	efx_filter_push_tx_limits(efx);
1033 
1034 	spin_unlock_bh(&state->lock);
1035 }
1036 
efx_probe_filters(struct efx_nic * efx)1037 int efx_probe_filters(struct efx_nic *efx)
1038 {
1039 	struct efx_filter_state *state;
1040 	struct efx_filter_table *table;
1041 	unsigned table_id;
1042 
1043 	state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
1044 	if (!state)
1045 		return -ENOMEM;
1046 	efx->filter_state = state;
1047 
1048 	spin_lock_init(&state->lock);
1049 
1050 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1051 #ifdef CONFIG_RFS_ACCEL
1052 		state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
1053 					     sizeof(*state->rps_flow_id),
1054 					     GFP_KERNEL);
1055 		if (!state->rps_flow_id)
1056 			goto fail;
1057 #endif
1058 		table = &state->table[EFX_FILTER_TABLE_RX_IP];
1059 		table->id = EFX_FILTER_TABLE_RX_IP;
1060 		table->offset = FR_BZ_RX_FILTER_TBL0;
1061 		table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
1062 		table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1063 	}
1064 
1065 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1066 		table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1067 		table->id = EFX_FILTER_TABLE_RX_MAC;
1068 		table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1069 		table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1070 		table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1071 
1072 		table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1073 		table->id = EFX_FILTER_TABLE_RX_DEF;
1074 		table->size = EFX_FILTER_SIZE_RX_DEF;
1075 
1076 		table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1077 		table->id = EFX_FILTER_TABLE_TX_MAC;
1078 		table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1079 		table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1080 		table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1081 	}
1082 
1083 	for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1084 		table = &state->table[table_id];
1085 		if (table->size == 0)
1086 			continue;
1087 		table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1088 					     sizeof(unsigned long),
1089 					     GFP_KERNEL);
1090 		if (!table->used_bitmap)
1091 			goto fail;
1092 		table->spec = vzalloc(table->size * sizeof(*table->spec));
1093 		if (!table->spec)
1094 			goto fail;
1095 	}
1096 
1097 	if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1098 		/* RX default filters must always exist */
1099 		unsigned i;
1100 		for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1101 			efx_filter_reset_rx_def(efx, i);
1102 	}
1103 
1104 	efx_filter_push_rx_config(efx);
1105 
1106 	return 0;
1107 
1108 fail:
1109 	efx_remove_filters(efx);
1110 	return -ENOMEM;
1111 }
1112 
efx_remove_filters(struct efx_nic * efx)1113 void efx_remove_filters(struct efx_nic *efx)
1114 {
1115 	struct efx_filter_state *state = efx->filter_state;
1116 	enum efx_filter_table_id table_id;
1117 
1118 	for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1119 		kfree(state->table[table_id].used_bitmap);
1120 		vfree(state->table[table_id].spec);
1121 	}
1122 #ifdef CONFIG_RFS_ACCEL
1123 	kfree(state->rps_flow_id);
1124 #endif
1125 	kfree(state);
1126 }
1127 
1128 /* Update scatter enable flags for filters pointing to our own RX queues */
efx_filter_update_rx_scatter(struct efx_nic * efx)1129 void efx_filter_update_rx_scatter(struct efx_nic *efx)
1130 {
1131 	struct efx_filter_state *state = efx->filter_state;
1132 	enum efx_filter_table_id table_id;
1133 	struct efx_filter_table *table;
1134 	efx_oword_t filter;
1135 	unsigned int filter_idx;
1136 
1137 	spin_lock_bh(&state->lock);
1138 
1139 	for (table_id = EFX_FILTER_TABLE_RX_IP;
1140 	     table_id <= EFX_FILTER_TABLE_RX_DEF;
1141 	     table_id++) {
1142 		table = &state->table[table_id];
1143 
1144 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1145 			if (!test_bit(filter_idx, table->used_bitmap) ||
1146 			    table->spec[filter_idx].dmaq_id >=
1147 			    efx->n_rx_channels)
1148 				continue;
1149 
1150 			if (efx->rx_scatter)
1151 				table->spec[filter_idx].flags |=
1152 					EFX_FILTER_FLAG_RX_SCATTER;
1153 			else
1154 				table->spec[filter_idx].flags &=
1155 					~EFX_FILTER_FLAG_RX_SCATTER;
1156 
1157 			if (table_id == EFX_FILTER_TABLE_RX_DEF)
1158 				/* Pushed by efx_filter_push_rx_config() */
1159 				continue;
1160 
1161 			efx_filter_build(&filter, &table->spec[filter_idx]);
1162 			efx_writeo(efx, &filter,
1163 				   table->offset + table->step * filter_idx);
1164 		}
1165 	}
1166 
1167 	efx_filter_push_rx_config(efx);
1168 
1169 	spin_unlock_bh(&state->lock);
1170 }
1171 
1172 #ifdef CONFIG_RFS_ACCEL
1173 
efx_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)1174 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1175 		   u16 rxq_index, u32 flow_id)
1176 {
1177 	struct efx_nic *efx = netdev_priv(net_dev);
1178 	struct efx_channel *channel;
1179 	struct efx_filter_state *state = efx->filter_state;
1180 	struct efx_filter_spec spec;
1181 	const struct iphdr *ip;
1182 	const __be16 *ports;
1183 	int nhoff;
1184 	int rc;
1185 
1186 	nhoff = skb_network_offset(skb);
1187 
1188 	if (skb->protocol != htons(ETH_P_IP))
1189 		return -EPROTONOSUPPORT;
1190 
1191 	/* RFS must validate the IP header length before calling us */
1192 	EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1193 	ip = (const struct iphdr *)(skb->data + nhoff);
1194 	if (ip_is_fragment(ip))
1195 		return -EPROTONOSUPPORT;
1196 	EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1197 	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1198 
1199 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
1200 	rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1201 				      ip->daddr, ports[1], ip->saddr, ports[0]);
1202 	if (rc)
1203 		return rc;
1204 
1205 	rc = efx_filter_insert_filter(efx, &spec, true);
1206 	if (rc < 0)
1207 		return rc;
1208 
1209 	/* Remember this so we can check whether to expire the filter later */
1210 	state->rps_flow_id[rc] = flow_id;
1211 	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1212 	++channel->rfs_filters_added;
1213 
1214 	netif_info(efx, rx_status, efx->net_dev,
1215 		   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1216 		   (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1217 		   &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1218 		   rxq_index, flow_id, rc);
1219 
1220 	return rc;
1221 }
1222 
__efx_filter_rfs_expire(struct efx_nic * efx,unsigned quota)1223 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1224 {
1225 	struct efx_filter_state *state = efx->filter_state;
1226 	struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1227 	unsigned mask = table->size - 1;
1228 	unsigned index;
1229 	unsigned stop;
1230 
1231 	if (!spin_trylock_bh(&state->lock))
1232 		return false;
1233 
1234 	index = state->rps_expire_index;
1235 	stop = (index + quota) & mask;
1236 
1237 	while (index != stop) {
1238 		if (test_bit(index, table->used_bitmap) &&
1239 		    table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1240 		    rps_may_expire_flow(efx->net_dev,
1241 					table->spec[index].dmaq_id,
1242 					state->rps_flow_id[index], index)) {
1243 			netif_info(efx, rx_status, efx->net_dev,
1244 				   "expiring filter %d [flow %u]\n",
1245 				   index, state->rps_flow_id[index]);
1246 			efx_filter_table_clear_entry(efx, table, index);
1247 		}
1248 		index = (index + 1) & mask;
1249 	}
1250 
1251 	state->rps_expire_index = stop;
1252 	if (table->used == 0)
1253 		efx_filter_table_reset_search_depth(table);
1254 
1255 	spin_unlock_bh(&state->lock);
1256 	return true;
1257 }
1258 
1259 #endif /* CONFIG_RFS_ACCEL */
1260