• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_OUTER_IPV6	= BIT(0),
35 	ICE_PKT_TUN_GTPC	= BIT(1),
36 	ICE_PKT_TUN_GTPU	= BIT(2),
37 	ICE_PKT_TUN_NVGRE	= BIT(3),
38 	ICE_PKT_TUN_UDP		= BIT(4),
39 	ICE_PKT_INNER_IPV6	= BIT(5),
40 	ICE_PKT_INNER_TCP	= BIT(6),
41 	ICE_PKT_INNER_UDP	= BIT(7),
42 	ICE_PKT_GTP_NOPAY	= BIT(8),
43 	ICE_PKT_KMALLOC		= BIT(9),
44 	ICE_PKT_PPPOE		= BIT(10),
45 	ICE_PKT_L2TPV3		= BIT(11),
46 };
47 
48 struct ice_dummy_pkt_offsets {
49 	enum ice_protocol_type type;
50 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
51 };
52 
53 struct ice_dummy_pkt_profile {
54 	const struct ice_dummy_pkt_offsets *offsets;
55 	const u8 *pkt;
56 	u32 match;
57 	u16 pkt_len;
58 	u16 offsets_len;
59 };
60 
61 #define ICE_DECLARE_PKT_OFFSETS(type)					\
62 	static const struct ice_dummy_pkt_offsets			\
63 	ice_dummy_##type##_packet_offsets[]
64 
65 #define ICE_DECLARE_PKT_TEMPLATE(type)					\
66 	static const u8 ice_dummy_##type##_packet[]
67 
68 #define ICE_PKT_PROFILE(type, m) {					\
69 	.match		= (m),						\
70 	.pkt		= ice_dummy_##type##_packet,			\
71 	.pkt_len	= sizeof(ice_dummy_##type##_packet),		\
72 	.offsets	= ice_dummy_##type##_packet_offsets,		\
73 	.offsets_len	= sizeof(ice_dummy_##type##_packet_offsets),	\
74 }
75 
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 	{ ICE_VLAN_OFOS,        12 },
78 };
79 
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
82 };
83 
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
85 	{ ICE_VLAN_EX,          12 },
86 	{ ICE_VLAN_IN,          16 },
87 };
88 
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 	0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
92 };
93 
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
95 	{ ICE_MAC_OFOS,		0 },
96 	{ ICE_ETYPE_OL,		12 },
97 	{ ICE_IPV4_OFOS,	14 },
98 	{ ICE_NVGRE,		34 },
99 	{ ICE_MAC_IL,		42 },
100 	{ ICE_ETYPE_IL,		54 },
101 	{ ICE_IPV4_IL,		56 },
102 	{ ICE_TCP_IL,		76 },
103 	{ ICE_PROTOCOL_LAST,	0 },
104 };
105 
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
108 	0x00, 0x00, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 
111 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
112 
113 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
114 	0x00, 0x00, 0x00, 0x00,
115 	0x00, 0x2F, 0x00, 0x00,
116 	0x00, 0x00, 0x00, 0x00,
117 	0x00, 0x00, 0x00, 0x00,
118 
119 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
120 	0x00, 0x00, 0x00, 0x00,
121 
122 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
123 	0x00, 0x00, 0x00, 0x00,
124 	0x00, 0x00, 0x00, 0x00,
125 
126 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
127 
128 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
129 	0x00, 0x00, 0x00, 0x00,
130 	0x00, 0x06, 0x00, 0x00,
131 	0x00, 0x00, 0x00, 0x00,
132 	0x00, 0x00, 0x00, 0x00,
133 
134 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
135 	0x00, 0x00, 0x00, 0x00,
136 	0x00, 0x00, 0x00, 0x00,
137 	0x50, 0x02, 0x20, 0x00,
138 	0x00, 0x00, 0x00, 0x00
139 };
140 
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 	{ ICE_MAC_OFOS,		0 },
143 	{ ICE_ETYPE_OL,		12 },
144 	{ ICE_IPV4_OFOS,	14 },
145 	{ ICE_NVGRE,		34 },
146 	{ ICE_MAC_IL,		42 },
147 	{ ICE_ETYPE_IL,		54 },
148 	{ ICE_IPV4_IL,		56 },
149 	{ ICE_UDP_ILOS,		76 },
150 	{ ICE_PROTOCOL_LAST,	0 },
151 };
152 
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
155 	0x00, 0x00, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 
158 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
159 
160 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
161 	0x00, 0x00, 0x00, 0x00,
162 	0x00, 0x2F, 0x00, 0x00,
163 	0x00, 0x00, 0x00, 0x00,
164 	0x00, 0x00, 0x00, 0x00,
165 
166 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
167 	0x00, 0x00, 0x00, 0x00,
168 
169 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 
173 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
174 
175 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
176 	0x00, 0x00, 0x00, 0x00,
177 	0x00, 0x11, 0x00, 0x00,
178 	0x00, 0x00, 0x00, 0x00,
179 	0x00, 0x00, 0x00, 0x00,
180 
181 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
182 	0x00, 0x08, 0x00, 0x00,
183 };
184 
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 	{ ICE_MAC_OFOS,		0 },
187 	{ ICE_ETYPE_OL,		12 },
188 	{ ICE_IPV4_OFOS,	14 },
189 	{ ICE_UDP_OF,		34 },
190 	{ ICE_VXLAN,		42 },
191 	{ ICE_GENEVE,		42 },
192 	{ ICE_VXLAN_GPE,	42 },
193 	{ ICE_MAC_IL,		50 },
194 	{ ICE_ETYPE_IL,		62 },
195 	{ ICE_IPV4_IL,		64 },
196 	{ ICE_TCP_IL,		84 },
197 	{ ICE_PROTOCOL_LAST,	0 },
198 };
199 
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
202 	0x00, 0x00, 0x00, 0x00,
203 	0x00, 0x00, 0x00, 0x00,
204 
205 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
206 
207 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 	0x00, 0x01, 0x00, 0x00,
209 	0x40, 0x11, 0x00, 0x00,
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 	0x00, 0x46, 0x00, 0x00,
215 
216 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 	0x00, 0x00, 0x00, 0x00,
218 
219 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 	0x00, 0x00, 0x00, 0x00,
221 	0x00, 0x00, 0x00, 0x00,
222 
223 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
224 
225 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 	0x00, 0x01, 0x00, 0x00,
227 	0x40, 0x06, 0x00, 0x00,
228 	0x00, 0x00, 0x00, 0x00,
229 	0x00, 0x00, 0x00, 0x00,
230 
231 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 	0x00, 0x00, 0x00, 0x00,
233 	0x00, 0x00, 0x00, 0x00,
234 	0x50, 0x02, 0x20, 0x00,
235 	0x00, 0x00, 0x00, 0x00
236 };
237 
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 	{ ICE_MAC_OFOS,		0 },
240 	{ ICE_ETYPE_OL,		12 },
241 	{ ICE_IPV4_OFOS,	14 },
242 	{ ICE_UDP_OF,		34 },
243 	{ ICE_VXLAN,		42 },
244 	{ ICE_GENEVE,		42 },
245 	{ ICE_VXLAN_GPE,	42 },
246 	{ ICE_MAC_IL,		50 },
247 	{ ICE_ETYPE_IL,		62 },
248 	{ ICE_IPV4_IL,		64 },
249 	{ ICE_UDP_ILOS,		84 },
250 	{ ICE_PROTOCOL_LAST,	0 },
251 };
252 
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
255 	0x00, 0x00, 0x00, 0x00,
256 	0x00, 0x00, 0x00, 0x00,
257 
258 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
259 
260 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 	0x00, 0x01, 0x00, 0x00,
262 	0x00, 0x11, 0x00, 0x00,
263 	0x00, 0x00, 0x00, 0x00,
264 	0x00, 0x00, 0x00, 0x00,
265 
266 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 	0x00, 0x3a, 0x00, 0x00,
268 
269 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 	0x00, 0x00, 0x00, 0x00,
271 
272 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 	0x00, 0x00, 0x00, 0x00,
274 	0x00, 0x00, 0x00, 0x00,
275 
276 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
277 
278 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 	0x00, 0x01, 0x00, 0x00,
280 	0x00, 0x11, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 	0x00, 0x00, 0x00, 0x00,
283 
284 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 	0x00, 0x08, 0x00, 0x00,
286 };
287 
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 	{ ICE_MAC_OFOS,		0 },
290 	{ ICE_ETYPE_OL,		12 },
291 	{ ICE_IPV4_OFOS,	14 },
292 	{ ICE_NVGRE,		34 },
293 	{ ICE_MAC_IL,		42 },
294 	{ ICE_ETYPE_IL,		54 },
295 	{ ICE_IPV6_IL,		56 },
296 	{ ICE_TCP_IL,		96 },
297 	{ ICE_PROTOCOL_LAST,	0 },
298 };
299 
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 
305 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
306 
307 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x2F, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 	0x00, 0x00, 0x00, 0x00,
312 
313 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 	0x00, 0x00, 0x00, 0x00,
315 
316 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 	0x00, 0x00, 0x00, 0x00,
318 	0x00, 0x00, 0x00, 0x00,
319 
320 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
321 
322 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 	0x00, 0x08, 0x06, 0x40,
324 	0x00, 0x00, 0x00, 0x00,
325 	0x00, 0x00, 0x00, 0x00,
326 	0x00, 0x00, 0x00, 0x00,
327 	0x00, 0x00, 0x00, 0x00,
328 	0x00, 0x00, 0x00, 0x00,
329 	0x00, 0x00, 0x00, 0x00,
330 	0x00, 0x00, 0x00, 0x00,
331 	0x00, 0x00, 0x00, 0x00,
332 
333 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 	0x50, 0x02, 0x20, 0x00,
337 	0x00, 0x00, 0x00, 0x00
338 };
339 
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 	{ ICE_MAC_OFOS,		0 },
342 	{ ICE_ETYPE_OL,		12 },
343 	{ ICE_IPV4_OFOS,	14 },
344 	{ ICE_NVGRE,		34 },
345 	{ ICE_MAC_IL,		42 },
346 	{ ICE_ETYPE_IL,		54 },
347 	{ ICE_IPV6_IL,		56 },
348 	{ ICE_UDP_ILOS,		96 },
349 	{ ICE_PROTOCOL_LAST,	0 },
350 };
351 
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 
357 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
358 
359 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x2F, 0x00, 0x00,
362 	0x00, 0x00, 0x00, 0x00,
363 	0x00, 0x00, 0x00, 0x00,
364 
365 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 	0x00, 0x00, 0x00, 0x00,
367 
368 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 	0x00, 0x00, 0x00, 0x00,
370 	0x00, 0x00, 0x00, 0x00,
371 
372 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
373 
374 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 	0x00, 0x08, 0x11, 0x40,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x00, 0x00, 0x00, 0x00,
382 	0x00, 0x00, 0x00, 0x00,
383 	0x00, 0x00, 0x00, 0x00,
384 
385 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 	0x00, 0x08, 0x00, 0x00,
387 };
388 
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 	{ ICE_MAC_OFOS,		0 },
391 	{ ICE_ETYPE_OL,		12 },
392 	{ ICE_IPV4_OFOS,	14 },
393 	{ ICE_UDP_OF,		34 },
394 	{ ICE_VXLAN,		42 },
395 	{ ICE_GENEVE,		42 },
396 	{ ICE_VXLAN_GPE,	42 },
397 	{ ICE_MAC_IL,		50 },
398 	{ ICE_ETYPE_IL,		62 },
399 	{ ICE_IPV6_IL,		64 },
400 	{ ICE_TCP_IL,		104 },
401 	{ ICE_PROTOCOL_LAST,	0 },
402 };
403 
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
406 	0x00, 0x00, 0x00, 0x00,
407 	0x00, 0x00, 0x00, 0x00,
408 
409 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
410 
411 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 	0x00, 0x01, 0x00, 0x00,
413 	0x40, 0x11, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 
417 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 	0x00, 0x5a, 0x00, 0x00,
419 
420 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 	0x00, 0x00, 0x00, 0x00,
422 
423 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 	0x00, 0x00, 0x00, 0x00,
425 	0x00, 0x00, 0x00, 0x00,
426 
427 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
428 
429 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 	0x00, 0x08, 0x06, 0x40,
431 	0x00, 0x00, 0x00, 0x00,
432 	0x00, 0x00, 0x00, 0x00,
433 	0x00, 0x00, 0x00, 0x00,
434 	0x00, 0x00, 0x00, 0x00,
435 	0x00, 0x00, 0x00, 0x00,
436 	0x00, 0x00, 0x00, 0x00,
437 	0x00, 0x00, 0x00, 0x00,
438 	0x00, 0x00, 0x00, 0x00,
439 
440 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 	0x00, 0x00, 0x00, 0x00,
442 	0x00, 0x00, 0x00, 0x00,
443 	0x50, 0x02, 0x20, 0x00,
444 	0x00, 0x00, 0x00, 0x00
445 };
446 
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 	{ ICE_MAC_OFOS,		0 },
449 	{ ICE_ETYPE_OL,		12 },
450 	{ ICE_IPV4_OFOS,	14 },
451 	{ ICE_UDP_OF,		34 },
452 	{ ICE_VXLAN,		42 },
453 	{ ICE_GENEVE,		42 },
454 	{ ICE_VXLAN_GPE,	42 },
455 	{ ICE_MAC_IL,		50 },
456 	{ ICE_ETYPE_IL,		62 },
457 	{ ICE_IPV6_IL,		64 },
458 	{ ICE_UDP_ILOS,		104 },
459 	{ ICE_PROTOCOL_LAST,	0 },
460 };
461 
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
464 	0x00, 0x00, 0x00, 0x00,
465 	0x00, 0x00, 0x00, 0x00,
466 
467 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
468 
469 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 	0x00, 0x01, 0x00, 0x00,
471 	0x00, 0x11, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 
475 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 	0x00, 0x4e, 0x00, 0x00,
477 
478 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 	0x00, 0x00, 0x00, 0x00,
480 
481 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 	0x00, 0x00, 0x00, 0x00,
483 	0x00, 0x00, 0x00, 0x00,
484 
485 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
486 
487 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 	0x00, 0x08, 0x11, 0x40,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 	0x00, 0x00, 0x00, 0x00,
497 
498 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 	0x00, 0x08, 0x00, 0x00,
500 };
501 
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 	{ ICE_MAC_OFOS,		0 },
505 	{ ICE_ETYPE_OL,		12 },
506 	{ ICE_IPV4_OFOS,	14 },
507 	{ ICE_UDP_ILOS,		34 },
508 	{ ICE_PROTOCOL_LAST,	0 },
509 };
510 
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 	0x00, 0x00, 0x00, 0x00,
515 	0x00, 0x00, 0x00, 0x00,
516 
517 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
518 
519 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 	0x00, 0x01, 0x00, 0x00,
521 	0x00, 0x11, 0x00, 0x00,
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 	0x00, 0x08, 0x00, 0x00,
527 
528 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
529 };
530 
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 	{ ICE_MAC_OFOS,		0 },
534 	{ ICE_ETYPE_OL,		12 },
535 	{ ICE_IPV4_OFOS,	14 },
536 	{ ICE_TCP_IL,		34 },
537 	{ ICE_PROTOCOL_LAST,	0 },
538 };
539 
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 	0x00, 0x00, 0x00, 0x00,
544 	0x00, 0x00, 0x00, 0x00,
545 
546 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
547 
548 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 	0x00, 0x01, 0x00, 0x00,
550 	0x00, 0x06, 0x00, 0x00,
551 	0x00, 0x00, 0x00, 0x00,
552 	0x00, 0x00, 0x00, 0x00,
553 
554 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 	0x00, 0x00, 0x00, 0x00,
556 	0x00, 0x00, 0x00, 0x00,
557 	0x50, 0x00, 0x00, 0x00,
558 	0x00, 0x00, 0x00, 0x00,
559 
560 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
561 };
562 
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 	{ ICE_MAC_OFOS,		0 },
565 	{ ICE_ETYPE_OL,		12 },
566 	{ ICE_IPV6_OFOS,	14 },
567 	{ ICE_TCP_IL,		54 },
568 	{ ICE_PROTOCOL_LAST,	0 },
569 };
570 
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 	0x00, 0x00, 0x00, 0x00,
574 	0x00, 0x00, 0x00, 0x00,
575 
576 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
577 
578 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 	0x00, 0x00, 0x00, 0x00,
581 	0x00, 0x00, 0x00, 0x00,
582 	0x00, 0x00, 0x00, 0x00,
583 	0x00, 0x00, 0x00, 0x00,
584 	0x00, 0x00, 0x00, 0x00,
585 	0x00, 0x00, 0x00, 0x00,
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x50, 0x00, 0x00, 0x00,
593 	0x00, 0x00, 0x00, 0x00,
594 
595 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
596 };
597 
598 /* IPv6 + UDP */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 	{ ICE_MAC_OFOS,		0 },
601 	{ ICE_ETYPE_OL,		12 },
602 	{ ICE_IPV6_OFOS,	14 },
603 	{ ICE_UDP_ILOS,		54 },
604 	{ ICE_PROTOCOL_LAST,	0 },
605 };
606 
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 	0x00, 0x00, 0x00, 0x00,
611 	0x00, 0x00, 0x00, 0x00,
612 
613 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
614 
615 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 	0x00, 0x00, 0x00, 0x00,
621 	0x00, 0x00, 0x00, 0x00,
622 	0x00, 0x00, 0x00, 0x00,
623 	0x00, 0x00, 0x00, 0x00,
624 	0x00, 0x00, 0x00, 0x00,
625 
626 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 	0x00, 0x10, 0x00, 0x00,
628 
629 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 	0x00, 0x00, 0x00, 0x00,
631 
632 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
633 };
634 
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 	{ ICE_MAC_OFOS,		0 },
638 	{ ICE_IPV4_OFOS,	14 },
639 	{ ICE_UDP_OF,		34 },
640 	{ ICE_GTP,		42 },
641 	{ ICE_IPV4_IL,		62 },
642 	{ ICE_TCP_IL,		82 },
643 	{ ICE_PROTOCOL_LAST,	0 },
644 };
645 
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 	0x00, 0x00, 0x00, 0x00,
649 	0x00, 0x00, 0x00, 0x00,
650 	0x08, 0x00,
651 
652 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 	0x00, 0x00, 0x00, 0x00,
654 	0x00, 0x11, 0x00, 0x00,
655 	0x00, 0x00, 0x00, 0x00,
656 	0x00, 0x00, 0x00, 0x00,
657 
658 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 	0x00, 0x44, 0x00, 0x00,
660 
661 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 	0x00, 0x00, 0x00, 0x00,
663 	0x00, 0x00, 0x00, 0x85,
664 
665 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 	0x00, 0x00, 0x00, 0x00,
667 
668 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x06, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_IPV4_OFOS,	14 },
687 	{ ICE_UDP_OF,		34 },
688 	{ ICE_GTP,		42 },
689 	{ ICE_IPV4_IL,		62 },
690 	{ ICE_UDP_ILOS,		82 },
691 	{ ICE_PROTOCOL_LAST,	0 },
692 };
693 
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 	0x00, 0x00, 0x00, 0x00,
697 	0x00, 0x00, 0x00, 0x00,
698 	0x08, 0x00,
699 
700 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x11, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 
706 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 	0x00, 0x38, 0x00, 0x00,
708 
709 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x85,
712 
713 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 	0x00, 0x00, 0x00, 0x00,
715 
716 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 	0x00, 0x00, 0x00, 0x00,
718 	0x00, 0x11, 0x00, 0x00,
719 	0x00, 0x00, 0x00, 0x00,
720 	0x00, 0x00, 0x00, 0x00,
721 
722 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 	0x00, 0x08, 0x00, 0x00,
724 
725 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
726 };
727 
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 	{ ICE_MAC_OFOS,		0 },
731 	{ ICE_IPV4_OFOS,	14 },
732 	{ ICE_UDP_OF,		34 },
733 	{ ICE_GTP,		42 },
734 	{ ICE_IPV6_IL,		62 },
735 	{ ICE_TCP_IL,		102 },
736 	{ ICE_PROTOCOL_LAST,	0 },
737 };
738 
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 	0x00, 0x00, 0x00, 0x00,
742 	0x00, 0x00, 0x00, 0x00,
743 	0x08, 0x00,
744 
745 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x11, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 	0x00, 0x58, 0x00, 0x00,
753 
754 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 	0x00, 0x00, 0x00, 0x00,
756 	0x00, 0x00, 0x00, 0x85,
757 
758 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 	0x00, 0x14, 0x06, 0x00,
763 	0x00, 0x00, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 	0x00, 0x00, 0x00, 0x00,
767 	0x00, 0x00, 0x00, 0x00,
768 	0x00, 0x00, 0x00, 0x00,
769 	0x00, 0x00, 0x00, 0x00,
770 	0x00, 0x00, 0x00, 0x00,
771 
772 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 	0x00, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 	0x50, 0x00, 0x00, 0x00,
776 	0x00, 0x00, 0x00, 0x00,
777 
778 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
779 };
780 
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 	{ ICE_MAC_OFOS,		0 },
783 	{ ICE_IPV4_OFOS,	14 },
784 	{ ICE_UDP_OF,		34 },
785 	{ ICE_GTP,		42 },
786 	{ ICE_IPV6_IL,		62 },
787 	{ ICE_UDP_ILOS,		102 },
788 	{ ICE_PROTOCOL_LAST,	0 },
789 };
790 
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 	0x08, 0x00,
796 
797 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x11, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 	0x00, 0x00, 0x00, 0x00,
802 
803 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 	0x00, 0x4c, 0x00, 0x00,
805 
806 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x85,
809 
810 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 	0x00, 0x00, 0x00, 0x00,
812 
813 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 	0x00, 0x08, 0x11, 0x00,
815 	0x00, 0x00, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x00, 0x00, 0x00, 0x00,
821 	0x00, 0x00, 0x00, 0x00,
822 	0x00, 0x00, 0x00, 0x00,
823 
824 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 	0x00, 0x08, 0x00, 0x00,
826 
827 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
828 };
829 
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 	{ ICE_MAC_OFOS,		0 },
832 	{ ICE_IPV6_OFOS,	14 },
833 	{ ICE_UDP_OF,		54 },
834 	{ ICE_GTP,		62 },
835 	{ ICE_IPV4_IL,		82 },
836 	{ ICE_TCP_IL,		102 },
837 	{ ICE_PROTOCOL_LAST,	0 },
838 };
839 
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 	0x00, 0x00, 0x00, 0x00,
843 	0x00, 0x00, 0x00, 0x00,
844 	0x86, 0xdd,
845 
846 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 	0x00, 0x44, 0x11, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 	0x00, 0x00, 0x00, 0x00,
852 	0x00, 0x00, 0x00, 0x00,
853 	0x00, 0x00, 0x00, 0x00,
854 	0x00, 0x00, 0x00, 0x00,
855 	0x00, 0x00, 0x00, 0x00,
856 
857 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 	0x00, 0x44, 0x00, 0x00,
859 
860 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 	0x00, 0x00, 0x00, 0x00,
862 	0x00, 0x00, 0x00, 0x85,
863 
864 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 	0x00, 0x00, 0x00, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x06, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 	0x00, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 	0x50, 0x00, 0x00, 0x00,
877 	0x00, 0x00, 0x00, 0x00,
878 
879 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881 
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 	{ ICE_MAC_OFOS,		0 },
884 	{ ICE_IPV6_OFOS,	14 },
885 	{ ICE_UDP_OF,		54 },
886 	{ ICE_GTP,		62 },
887 	{ ICE_IPV4_IL,		82 },
888 	{ ICE_UDP_ILOS,		102 },
889 	{ ICE_PROTOCOL_LAST,	0 },
890 };
891 
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 	0x00, 0x00, 0x00, 0x00,
895 	0x00, 0x00, 0x00, 0x00,
896 	0x86, 0xdd,
897 
898 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 	0x00, 0x38, 0x11, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 	0x00, 0x38, 0x00, 0x00,
911 
912 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 	0x00, 0x00, 0x00, 0x00,
914 	0x00, 0x00, 0x00, 0x85,
915 
916 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 	0x00, 0x00, 0x00, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 	0x00, 0x08, 0x00, 0x00,
927 
928 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
929 };
930 
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 	{ ICE_MAC_OFOS,		0 },
933 	{ ICE_IPV6_OFOS,	14 },
934 	{ ICE_UDP_OF,		54 },
935 	{ ICE_GTP,		62 },
936 	{ ICE_IPV6_IL,		82 },
937 	{ ICE_TCP_IL,		122 },
938 	{ ICE_PROTOCOL_LAST,	0 },
939 };
940 
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 	0x86, 0xdd,
946 
947 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 	0x00, 0x58, 0x11, 0x00,
949 	0x00, 0x00, 0x00, 0x00,
950 	0x00, 0x00, 0x00, 0x00,
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 	0x00, 0x00, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 	0x00, 0x00, 0x00, 0x00,
956 	0x00, 0x00, 0x00, 0x00,
957 
958 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 	0x00, 0x58, 0x00, 0x00,
960 
961 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 	0x00, 0x00, 0x00, 0x00,
963 	0x00, 0x00, 0x00, 0x85,
964 
965 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 	0x00, 0x00, 0x00, 0x00,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 	0x00, 0x14, 0x06, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 	0x00, 0x00, 0x00, 0x00,
981 	0x00, 0x00, 0x00, 0x00,
982 	0x50, 0x00, 0x00, 0x00,
983 	0x00, 0x00, 0x00, 0x00,
984 
985 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
986 };
987 
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 	{ ICE_MAC_OFOS,		0 },
990 	{ ICE_IPV6_OFOS,	14 },
991 	{ ICE_UDP_OF,		54 },
992 	{ ICE_GTP,		62 },
993 	{ ICE_IPV6_IL,		82 },
994 	{ ICE_UDP_ILOS,		122 },
995 	{ ICE_PROTOCOL_LAST,	0 },
996 };
997 
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 	0x00, 0x00, 0x00, 0x00,
1001 	0x00, 0x00, 0x00, 0x00,
1002 	0x86, 0xdd,
1003 
1004 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 	0x00, 0x4c, 0x11, 0x00,
1006 	0x00, 0x00, 0x00, 0x00,
1007 	0x00, 0x00, 0x00, 0x00,
1008 	0x00, 0x00, 0x00, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 	0x00, 0x00, 0x00, 0x00,
1013 	0x00, 0x00, 0x00, 0x00,
1014 
1015 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 	0x00, 0x4c, 0x00, 0x00,
1017 
1018 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 	0x00, 0x00, 0x00, 0x00,
1020 	0x00, 0x00, 0x00, 0x85,
1021 
1022 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 	0x00, 0x00, 0x00, 0x00,
1024 
1025 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 	0x00, 0x08, 0x11, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x00,
1031 	0x00, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x00,
1033 	0x00, 0x00, 0x00, 0x00,
1034 	0x00, 0x00, 0x00, 0x00,
1035 
1036 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 	0x00, 0x08, 0x00, 0x00,
1038 
1039 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1040 };
1041 
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 	{ ICE_MAC_OFOS,		0 },
1044 	{ ICE_IPV4_OFOS,	14 },
1045 	{ ICE_UDP_OF,		34 },
1046 	{ ICE_GTP_NO_PAY,	42 },
1047 	{ ICE_PROTOCOL_LAST,	0 },
1048 };
1049 
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 	0x00, 0x00, 0x00, 0x00,
1053 	0x00, 0x00, 0x00, 0x00,
1054 	0x08, 0x00,
1055 
1056 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 	0x00, 0x00, 0x40, 0x00,
1058 	0x40, 0x11, 0x00, 0x00,
1059 	0x00, 0x00, 0x00, 0x00,
1060 	0x00, 0x00, 0x00, 0x00,
1061 
1062 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 	0x00, 0x00, 0x00, 0x00,
1064 
1065 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x00, 0x00, 0x00, 0x85,
1068 
1069 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 	0x00, 0x00, 0x00, 0x00,
1071 
1072 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 	0x00, 0x00, 0x40, 0x00,
1074 	0x40, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00,
1078 };
1079 
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 	{ ICE_MAC_OFOS,		0 },
1082 	{ ICE_IPV6_OFOS,	14 },
1083 	{ ICE_UDP_OF,		54 },
1084 	{ ICE_GTP_NO_PAY,	62 },
1085 	{ ICE_PROTOCOL_LAST,	0 },
1086 };
1087 
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 	0x00, 0x00, 0x00, 0x00,
1091 	0x00, 0x00, 0x00, 0x00,
1092 	0x86, 0xdd,
1093 
1094 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x00, 0x00, 0x00, 0x00,
1101 	0x00, 0x00, 0x00, 0x00,
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 
1105 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 	0x00, 0x00, 0x00, 0x00,
1107 
1108 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 	0x00, 0x00, 0x00, 0x00,
1110 
1111 	0x00, 0x00,
1112 };
1113 
1114 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1115 	{ ICE_MAC_OFOS,		0 },
1116 	{ ICE_ETYPE_OL,		12 },
1117 	{ ICE_PPPOE,		14 },
1118 	{ ICE_IPV4_OFOS,	22 },
1119 	{ ICE_TCP_IL,		42 },
1120 	{ ICE_PROTOCOL_LAST,	0 },
1121 };
1122 
1123 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1124 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 	0x00, 0x00, 0x00, 0x00,
1126 	0x00, 0x00, 0x00, 0x00,
1127 
1128 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1129 
1130 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1131 	0x00, 0x16,
1132 
1133 	0x00, 0x21,		/* PPP Link Layer 20 */
1134 
1135 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1136 	0x00, 0x01, 0x00, 0x00,
1137 	0x00, 0x06, 0x00, 0x00,
1138 	0x00, 0x00, 0x00, 0x00,
1139 	0x00, 0x00, 0x00, 0x00,
1140 
1141 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1142 	0x00, 0x00, 0x00, 0x00,
1143 	0x00, 0x00, 0x00, 0x00,
1144 	0x50, 0x00, 0x00, 0x00,
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1148 };
1149 
1150 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1151 	{ ICE_MAC_OFOS,		0 },
1152 	{ ICE_ETYPE_OL,		12 },
1153 	{ ICE_PPPOE,		14 },
1154 	{ ICE_IPV4_OFOS,	22 },
1155 	{ ICE_UDP_ILOS,		42 },
1156 	{ ICE_PROTOCOL_LAST,	0 },
1157 };
1158 
1159 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1160 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1161 	0x00, 0x00, 0x00, 0x00,
1162 	0x00, 0x00, 0x00, 0x00,
1163 
1164 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1165 
1166 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1167 	0x00, 0x16,
1168 
1169 	0x00, 0x21,		/* PPP Link Layer 20 */
1170 
1171 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1172 	0x00, 0x01, 0x00, 0x00,
1173 	0x00, 0x11, 0x00, 0x00,
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 
1177 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1178 	0x00, 0x08, 0x00, 0x00,
1179 
1180 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1181 };
1182 
1183 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1184 	{ ICE_MAC_OFOS,		0 },
1185 	{ ICE_ETYPE_OL,		12 },
1186 	{ ICE_PPPOE,		14 },
1187 	{ ICE_IPV6_OFOS,	22 },
1188 	{ ICE_TCP_IL,		62 },
1189 	{ ICE_PROTOCOL_LAST,	0 },
1190 };
1191 
1192 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1193 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1194 	0x00, 0x00, 0x00, 0x00,
1195 	0x00, 0x00, 0x00, 0x00,
1196 
1197 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1198 
1199 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1200 	0x00, 0x2a,
1201 
1202 	0x00, 0x57,		/* PPP Link Layer 20 */
1203 
1204 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1205 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1206 	0x00, 0x00, 0x00, 0x00,
1207 	0x00, 0x00, 0x00, 0x00,
1208 	0x00, 0x00, 0x00, 0x00,
1209 	0x00, 0x00, 0x00, 0x00,
1210 	0x00, 0x00, 0x00, 0x00,
1211 	0x00, 0x00, 0x00, 0x00,
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 
1215 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1216 	0x00, 0x00, 0x00, 0x00,
1217 	0x00, 0x00, 0x00, 0x00,
1218 	0x50, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 
1221 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1222 };
1223 
1224 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1225 	{ ICE_MAC_OFOS,		0 },
1226 	{ ICE_ETYPE_OL,		12 },
1227 	{ ICE_PPPOE,		14 },
1228 	{ ICE_IPV6_OFOS,	22 },
1229 	{ ICE_UDP_ILOS,		62 },
1230 	{ ICE_PROTOCOL_LAST,	0 },
1231 };
1232 
1233 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1234 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1235 	0x00, 0x00, 0x00, 0x00,
1236 	0x00, 0x00, 0x00, 0x00,
1237 
1238 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1239 
1240 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1241 	0x00, 0x2a,
1242 
1243 	0x00, 0x57,		/* PPP Link Layer 20 */
1244 
1245 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1246 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1247 	0x00, 0x00, 0x00, 0x00,
1248 	0x00, 0x00, 0x00, 0x00,
1249 	0x00, 0x00, 0x00, 0x00,
1250 	0x00, 0x00, 0x00, 0x00,
1251 	0x00, 0x00, 0x00, 0x00,
1252 	0x00, 0x00, 0x00, 0x00,
1253 	0x00, 0x00, 0x00, 0x00,
1254 	0x00, 0x00, 0x00, 0x00,
1255 
1256 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1257 	0x00, 0x08, 0x00, 0x00,
1258 
1259 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1260 };
1261 
1262 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1263 	{ ICE_MAC_OFOS,		0 },
1264 	{ ICE_ETYPE_OL,		12 },
1265 	{ ICE_IPV4_OFOS,	14 },
1266 	{ ICE_L2TPV3,		34 },
1267 	{ ICE_PROTOCOL_LAST,	0 },
1268 };
1269 
1270 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1271 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1272 	0x00, 0x00, 0x00, 0x00,
1273 	0x00, 0x00, 0x00, 0x00,
1274 
1275 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
1276 
1277 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1278 	0x00, 0x00, 0x40, 0x00,
1279 	0x40, 0x73, 0x00, 0x00,
1280 	0x00, 0x00, 0x00, 0x00,
1281 	0x00, 0x00, 0x00, 0x00,
1282 
1283 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1284 	0x00, 0x00, 0x00, 0x00,
1285 	0x00, 0x00, 0x00, 0x00,
1286 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1287 };
1288 
1289 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1290 	{ ICE_MAC_OFOS,		0 },
1291 	{ ICE_ETYPE_OL,		12 },
1292 	{ ICE_IPV6_OFOS,	14 },
1293 	{ ICE_L2TPV3,		54 },
1294 	{ ICE_PROTOCOL_LAST,	0 },
1295 };
1296 
1297 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1298 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1299 	0x00, 0x00, 0x00, 0x00,
1300 	0x00, 0x00, 0x00, 0x00,
1301 
1302 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
1303 
1304 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1305 	0x00, 0x0c, 0x73, 0x40,
1306 	0x00, 0x00, 0x00, 0x00,
1307 	0x00, 0x00, 0x00, 0x00,
1308 	0x00, 0x00, 0x00, 0x00,
1309 	0x00, 0x00, 0x00, 0x00,
1310 	0x00, 0x00, 0x00, 0x00,
1311 	0x00, 0x00, 0x00, 0x00,
1312 	0x00, 0x00, 0x00, 0x00,
1313 	0x00, 0x00, 0x00, 0x00,
1314 
1315 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1316 	0x00, 0x00, 0x00, 0x00,
1317 	0x00, 0x00, 0x00, 0x00,
1318 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1319 };
1320 
1321 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1322 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1323 				  ICE_PKT_GTP_NOPAY),
1324 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1325 					    ICE_PKT_OUTER_IPV6 |
1326 					    ICE_PKT_INNER_IPV6 |
1327 					    ICE_PKT_INNER_UDP),
1328 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1329 					    ICE_PKT_OUTER_IPV6 |
1330 					    ICE_PKT_INNER_IPV6),
1331 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1332 					    ICE_PKT_OUTER_IPV6 |
1333 					    ICE_PKT_INNER_UDP),
1334 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1335 					    ICE_PKT_OUTER_IPV6),
1336 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1337 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1338 					    ICE_PKT_INNER_IPV6 |
1339 					    ICE_PKT_INNER_UDP),
1340 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1341 					    ICE_PKT_INNER_IPV6),
1342 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1343 					    ICE_PKT_INNER_UDP),
1344 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1345 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1346 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1347 	ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1348 					ICE_PKT_INNER_UDP),
1349 	ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1350 	ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1351 	ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1352 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1353 				      ICE_PKT_INNER_TCP),
1354 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1355 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1356 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1357 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1358 					  ICE_PKT_INNER_IPV6 |
1359 					  ICE_PKT_INNER_TCP),
1360 	ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1361 	ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1362 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1363 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1364 					  ICE_PKT_INNER_IPV6),
1365 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1366 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1367 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1368 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1369 	ICE_PKT_PROFILE(tcp, 0),
1370 };
1371 
1372 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1373 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1374 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1375 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1376 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1377 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1378 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1379 
1380 /* this is a recipe to profile association bitmap */
1381 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1382 			  ICE_MAX_NUM_PROFILES);
1383 
1384 /* this is a profile to recipe association bitmap */
1385 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1386 			  ICE_MAX_NUM_RECIPES);
1387 
1388 /**
1389  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1390  * @hw: pointer to the HW struct
1391  *
1392  * Allocate memory for the entire recipe table and initialize the structures/
1393  * entries corresponding to basic recipes.
1394  */
ice_init_def_sw_recp(struct ice_hw * hw)1395 int ice_init_def_sw_recp(struct ice_hw *hw)
1396 {
1397 	struct ice_sw_recipe *recps;
1398 	u8 i;
1399 
1400 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1401 			     sizeof(*recps), GFP_KERNEL);
1402 	if (!recps)
1403 		return -ENOMEM;
1404 
1405 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1406 		recps[i].root_rid = i;
1407 		INIT_LIST_HEAD(&recps[i].filt_rules);
1408 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1409 		INIT_LIST_HEAD(&recps[i].rg_list);
1410 		mutex_init(&recps[i].filt_rule_lock);
1411 	}
1412 
1413 	hw->switch_info->recp_list = recps;
1414 
1415 	return 0;
1416 }
1417 
1418 /**
1419  * ice_aq_get_sw_cfg - get switch configuration
1420  * @hw: pointer to the hardware structure
1421  * @buf: pointer to the result buffer
1422  * @buf_size: length of the buffer available for response
1423  * @req_desc: pointer to requested descriptor
1424  * @num_elems: pointer to number of elements
1425  * @cd: pointer to command details structure or NULL
1426  *
1427  * Get switch configuration (0x0200) to be placed in buf.
1428  * This admin command returns information such as initial VSI/port number
1429  * and switch ID it belongs to.
1430  *
1431  * NOTE: *req_desc is both an input/output parameter.
1432  * The caller of this function first calls this function with *request_desc set
1433  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1434  * configuration information has been returned; if non-zero (meaning not all
1435  * the information was returned), the caller should call this function again
1436  * with *req_desc set to the previous value returned by f/w to get the
1437  * next block of switch configuration information.
1438  *
1439  * *num_elems is output only parameter. This reflects the number of elements
1440  * in response buffer. The caller of this function to use *num_elems while
1441  * parsing the response buffer.
1442  */
1443 static int
ice_aq_get_sw_cfg(struct ice_hw * hw,struct ice_aqc_get_sw_cfg_resp_elem * buf,u16 buf_size,u16 * req_desc,u16 * num_elems,struct ice_sq_cd * cd)1444 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1445 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1446 		  struct ice_sq_cd *cd)
1447 {
1448 	struct ice_aqc_get_sw_cfg *cmd;
1449 	struct ice_aq_desc desc;
1450 	int status;
1451 
1452 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1453 	cmd = &desc.params.get_sw_conf;
1454 	cmd->element = cpu_to_le16(*req_desc);
1455 
1456 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1457 	if (!status) {
1458 		*req_desc = le16_to_cpu(cmd->element);
1459 		*num_elems = le16_to_cpu(cmd->num_elems);
1460 	}
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * ice_aq_add_vsi
1467  * @hw: pointer to the HW struct
1468  * @vsi_ctx: pointer to a VSI context struct
1469  * @cd: pointer to command details structure or NULL
1470  *
1471  * Add a VSI context to the hardware (0x0210)
1472  */
1473 static int
ice_aq_add_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1474 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1475 	       struct ice_sq_cd *cd)
1476 {
1477 	struct ice_aqc_add_update_free_vsi_resp *res;
1478 	struct ice_aqc_add_get_update_free_vsi *cmd;
1479 	struct ice_aq_desc desc;
1480 	int status;
1481 
1482 	cmd = &desc.params.vsi_cmd;
1483 	res = &desc.params.add_update_free_vsi_res;
1484 
1485 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1486 
1487 	if (!vsi_ctx->alloc_from_pool)
1488 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1489 					   ICE_AQ_VSI_IS_VALID);
1490 	cmd->vf_id = vsi_ctx->vf_num;
1491 
1492 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1493 
1494 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1495 
1496 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1497 				 sizeof(vsi_ctx->info), cd);
1498 
1499 	if (!status) {
1500 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1501 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1502 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1503 	}
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * ice_aq_free_vsi
1510  * @hw: pointer to the HW struct
1511  * @vsi_ctx: pointer to a VSI context struct
1512  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1513  * @cd: pointer to command details structure or NULL
1514  *
1515  * Free VSI context info from hardware (0x0213)
1516  */
1517 static int
ice_aq_free_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)1518 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1519 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1520 {
1521 	struct ice_aqc_add_update_free_vsi_resp *resp;
1522 	struct ice_aqc_add_get_update_free_vsi *cmd;
1523 	struct ice_aq_desc desc;
1524 	int status;
1525 
1526 	cmd = &desc.params.vsi_cmd;
1527 	resp = &desc.params.add_update_free_vsi_res;
1528 
1529 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1530 
1531 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1532 	if (keep_vsi_alloc)
1533 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1534 
1535 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1536 	if (!status) {
1537 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1538 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1539 	}
1540 
1541 	return status;
1542 }
1543 
1544 /**
1545  * ice_aq_update_vsi
1546  * @hw: pointer to the HW struct
1547  * @vsi_ctx: pointer to a VSI context struct
1548  * @cd: pointer to command details structure or NULL
1549  *
1550  * Update VSI context in the hardware (0x0211)
1551  */
1552 static int
ice_aq_update_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1553 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1554 		  struct ice_sq_cd *cd)
1555 {
1556 	struct ice_aqc_add_update_free_vsi_resp *resp;
1557 	struct ice_aqc_add_get_update_free_vsi *cmd;
1558 	struct ice_aq_desc desc;
1559 	int status;
1560 
1561 	cmd = &desc.params.vsi_cmd;
1562 	resp = &desc.params.add_update_free_vsi_res;
1563 
1564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1565 
1566 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1567 
1568 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1569 
1570 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1571 				 sizeof(vsi_ctx->info), cd);
1572 
1573 	if (!status) {
1574 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1575 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1576 	}
1577 
1578 	return status;
1579 }
1580 
1581 /**
1582  * ice_is_vsi_valid - check whether the VSI is valid or not
1583  * @hw: pointer to the HW struct
1584  * @vsi_handle: VSI handle
1585  *
1586  * check whether the VSI is valid or not
1587  */
ice_is_vsi_valid(struct ice_hw * hw,u16 vsi_handle)1588 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1589 {
1590 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1591 }
1592 
1593 /**
1594  * ice_get_hw_vsi_num - return the HW VSI number
1595  * @hw: pointer to the HW struct
1596  * @vsi_handle: VSI handle
1597  *
1598  * return the HW VSI number
1599  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1600  */
ice_get_hw_vsi_num(struct ice_hw * hw,u16 vsi_handle)1601 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1602 {
1603 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1604 }
1605 
1606 /**
1607  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1608  * @hw: pointer to the HW struct
1609  * @vsi_handle: VSI handle
1610  *
1611  * return the VSI context entry for a given VSI handle
1612  */
ice_get_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)1613 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1614 {
1615 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1616 }
1617 
1618 /**
1619  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1620  * @hw: pointer to the HW struct
1621  * @vsi_handle: VSI handle
1622  * @vsi: VSI context pointer
1623  *
1624  * save the VSI context entry for a given VSI handle
1625  */
1626 static void
ice_save_vsi_ctx(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi)1627 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1628 {
1629 	hw->vsi_ctx[vsi_handle] = vsi;
1630 }
1631 
1632 /**
1633  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1634  * @hw: pointer to the HW struct
1635  * @vsi_handle: VSI handle
1636  */
ice_clear_vsi_q_ctx(struct ice_hw * hw,u16 vsi_handle)1637 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1638 {
1639 	struct ice_vsi_ctx *vsi;
1640 	u8 i;
1641 
1642 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1643 	if (!vsi)
1644 		return;
1645 	ice_for_each_traffic_class(i) {
1646 		if (vsi->lan_q_ctx[i]) {
1647 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1648 			vsi->lan_q_ctx[i] = NULL;
1649 		}
1650 		if (vsi->rdma_q_ctx[i]) {
1651 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1652 			vsi->rdma_q_ctx[i] = NULL;
1653 		}
1654 	}
1655 }
1656 
1657 /**
1658  * ice_clear_vsi_ctx - clear the VSI context entry
1659  * @hw: pointer to the HW struct
1660  * @vsi_handle: VSI handle
1661  *
1662  * clear the VSI context entry
1663  */
ice_clear_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)1664 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1665 {
1666 	struct ice_vsi_ctx *vsi;
1667 
1668 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1669 	if (vsi) {
1670 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1671 		devm_kfree(ice_hw_to_dev(hw), vsi);
1672 		hw->vsi_ctx[vsi_handle] = NULL;
1673 	}
1674 }
1675 
1676 /**
1677  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1678  * @hw: pointer to the HW struct
1679  */
ice_clear_all_vsi_ctx(struct ice_hw * hw)1680 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1681 {
1682 	u16 i;
1683 
1684 	for (i = 0; i < ICE_MAX_VSI; i++)
1685 		ice_clear_vsi_ctx(hw, i);
1686 }
1687 
1688 /**
1689  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1690  * @hw: pointer to the HW struct
1691  * @vsi_handle: unique VSI handle provided by drivers
1692  * @vsi_ctx: pointer to a VSI context struct
1693  * @cd: pointer to command details structure or NULL
1694  *
1695  * Add a VSI context to the hardware also add it into the VSI handle list.
1696  * If this function gets called after reset for existing VSIs then update
1697  * with the new HW VSI number in the corresponding VSI handle list entry.
1698  */
1699 int
ice_add_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1700 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1701 	    struct ice_sq_cd *cd)
1702 {
1703 	struct ice_vsi_ctx *tmp_vsi_ctx;
1704 	int status;
1705 
1706 	if (vsi_handle >= ICE_MAX_VSI)
1707 		return -EINVAL;
1708 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1709 	if (status)
1710 		return status;
1711 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1712 	if (!tmp_vsi_ctx) {
1713 		/* Create a new VSI context */
1714 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1715 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1716 		if (!tmp_vsi_ctx) {
1717 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1718 			return -ENOMEM;
1719 		}
1720 		*tmp_vsi_ctx = *vsi_ctx;
1721 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1722 	} else {
1723 		/* update with new HW VSI num */
1724 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 /**
1731  * ice_free_vsi- free VSI context from hardware and VSI handle list
1732  * @hw: pointer to the HW struct
1733  * @vsi_handle: unique VSI handle
1734  * @vsi_ctx: pointer to a VSI context struct
1735  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1736  * @cd: pointer to command details structure or NULL
1737  *
1738  * Free VSI context info from hardware as well as from VSI handle list
1739  */
1740 int
ice_free_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)1741 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1742 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1743 {
1744 	int status;
1745 
1746 	if (!ice_is_vsi_valid(hw, vsi_handle))
1747 		return -EINVAL;
1748 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1749 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1750 	if (!status)
1751 		ice_clear_vsi_ctx(hw, vsi_handle);
1752 	return status;
1753 }
1754 
1755 /**
1756  * ice_update_vsi
1757  * @hw: pointer to the HW struct
1758  * @vsi_handle: unique VSI handle
1759  * @vsi_ctx: pointer to a VSI context struct
1760  * @cd: pointer to command details structure or NULL
1761  *
1762  * Update VSI context in the hardware
1763  */
1764 int
ice_update_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1765 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1766 	       struct ice_sq_cd *cd)
1767 {
1768 	if (!ice_is_vsi_valid(hw, vsi_handle))
1769 		return -EINVAL;
1770 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1771 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1772 }
1773 
1774 /**
1775  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1776  * @hw: pointer to HW struct
1777  * @vsi_handle: VSI SW index
1778  * @enable: boolean for enable/disable
1779  */
1780 int
ice_cfg_rdma_fltr(struct ice_hw * hw,u16 vsi_handle,bool enable)1781 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1782 {
1783 	struct ice_vsi_ctx *ctx, *cached_ctx;
1784 	int status;
1785 
1786 	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1787 	if (!cached_ctx)
1788 		return -ENOENT;
1789 
1790 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1791 	if (!ctx)
1792 		return -ENOMEM;
1793 
1794 	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1795 	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1796 	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1797 
1798 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1799 
1800 	if (enable)
1801 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1802 	else
1803 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1804 
1805 	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1806 	if (!status) {
1807 		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1808 		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1809 	}
1810 
1811 	kfree(ctx);
1812 	return status;
1813 }
1814 
1815 /**
1816  * ice_aq_alloc_free_vsi_list
1817  * @hw: pointer to the HW struct
1818  * @vsi_list_id: VSI list ID returned or used for lookup
1819  * @lkup_type: switch rule filter lookup type
1820  * @opc: switch rules population command type - pass in the command opcode
1821  *
1822  * allocates or free a VSI list resource
1823  */
1824 static int
ice_aq_alloc_free_vsi_list(struct ice_hw * hw,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type,enum ice_adminq_opc opc)1825 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1826 			   enum ice_sw_lkup_type lkup_type,
1827 			   enum ice_adminq_opc opc)
1828 {
1829 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1830 	struct ice_aqc_res_elem *vsi_ele;
1831 	u16 buf_len;
1832 	int status;
1833 
1834 	buf_len = struct_size(sw_buf, elem, 1);
1835 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1836 	if (!sw_buf)
1837 		return -ENOMEM;
1838 	sw_buf->num_elems = cpu_to_le16(1);
1839 
1840 	if (lkup_type == ICE_SW_LKUP_MAC ||
1841 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1842 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1843 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1844 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1845 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1846 	    lkup_type == ICE_SW_LKUP_DFLT) {
1847 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1848 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1849 		sw_buf->res_type =
1850 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1851 	} else {
1852 		status = -EINVAL;
1853 		goto ice_aq_alloc_free_vsi_list_exit;
1854 	}
1855 
1856 	if (opc == ice_aqc_opc_free_res)
1857 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1858 
1859 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1860 	if (status)
1861 		goto ice_aq_alloc_free_vsi_list_exit;
1862 
1863 	if (opc == ice_aqc_opc_alloc_res) {
1864 		vsi_ele = &sw_buf->elem[0];
1865 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1866 	}
1867 
1868 ice_aq_alloc_free_vsi_list_exit:
1869 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1870 	return status;
1871 }
1872 
1873 /**
1874  * ice_aq_sw_rules - add/update/remove switch rules
1875  * @hw: pointer to the HW struct
1876  * @rule_list: pointer to switch rule population list
1877  * @rule_list_sz: total size of the rule list in bytes
1878  * @num_rules: number of switch rules in the rule_list
1879  * @opc: switch rules population command type - pass in the command opcode
1880  * @cd: pointer to command details structure or NULL
1881  *
1882  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1883  */
1884 int
ice_aq_sw_rules(struct ice_hw * hw,void * rule_list,u16 rule_list_sz,u8 num_rules,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1885 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1886 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1887 {
1888 	struct ice_aq_desc desc;
1889 	int status;
1890 
1891 	if (opc != ice_aqc_opc_add_sw_rules &&
1892 	    opc != ice_aqc_opc_update_sw_rules &&
1893 	    opc != ice_aqc_opc_remove_sw_rules)
1894 		return -EINVAL;
1895 
1896 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1897 
1898 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1899 	desc.params.sw_rules.num_rules_fltr_entry_index =
1900 		cpu_to_le16(num_rules);
1901 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1902 	if (opc != ice_aqc_opc_add_sw_rules &&
1903 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1904 		status = -ENOENT;
1905 
1906 	return status;
1907 }
1908 
1909 /**
1910  * ice_aq_add_recipe - add switch recipe
1911  * @hw: pointer to the HW struct
1912  * @s_recipe_list: pointer to switch rule population list
1913  * @num_recipes: number of switch recipes in the list
1914  * @cd: pointer to command details structure or NULL
1915  *
1916  * Add(0x0290)
1917  */
1918 static int
ice_aq_add_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 num_recipes,struct ice_sq_cd * cd)1919 ice_aq_add_recipe(struct ice_hw *hw,
1920 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1921 		  u16 num_recipes, struct ice_sq_cd *cd)
1922 {
1923 	struct ice_aqc_add_get_recipe *cmd;
1924 	struct ice_aq_desc desc;
1925 	u16 buf_size;
1926 
1927 	cmd = &desc.params.add_get_recipe;
1928 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1929 
1930 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1931 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1932 
1933 	buf_size = num_recipes * sizeof(*s_recipe_list);
1934 
1935 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1936 }
1937 
1938 /**
1939  * ice_aq_get_recipe - get switch recipe
1940  * @hw: pointer to the HW struct
1941  * @s_recipe_list: pointer to switch rule population list
1942  * @num_recipes: pointer to the number of recipes (input and output)
1943  * @recipe_root: root recipe number of recipe(s) to retrieve
1944  * @cd: pointer to command details structure or NULL
1945  *
1946  * Get(0x0292)
1947  *
1948  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1949  * On output, *num_recipes will equal the number of entries returned in
1950  * s_recipe_list.
1951  *
1952  * The caller must supply enough space in s_recipe_list to hold all possible
1953  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1954  */
1955 static int
ice_aq_get_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 * num_recipes,u16 recipe_root,struct ice_sq_cd * cd)1956 ice_aq_get_recipe(struct ice_hw *hw,
1957 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1958 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1959 {
1960 	struct ice_aqc_add_get_recipe *cmd;
1961 	struct ice_aq_desc desc;
1962 	u16 buf_size;
1963 	int status;
1964 
1965 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1966 		return -EINVAL;
1967 
1968 	cmd = &desc.params.add_get_recipe;
1969 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1970 
1971 	cmd->return_index = cpu_to_le16(recipe_root);
1972 	cmd->num_sub_recipes = 0;
1973 
1974 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1975 
1976 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1977 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1978 
1979 	return status;
1980 }
1981 
1982 /**
1983  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1984  * @hw: pointer to the HW struct
1985  * @params: parameters used to update the default recipe
1986  *
1987  * This function only supports updating default recipes and it only supports
1988  * updating a single recipe based on the lkup_idx at a time.
1989  *
1990  * This is done as a read-modify-write operation. First, get the current recipe
1991  * contents based on the recipe's ID. Then modify the field vector index and
1992  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1993  * the pre-existing recipe with the modifications.
1994  */
1995 int
ice_update_recipe_lkup_idx(struct ice_hw * hw,struct ice_update_recipe_lkup_idx_params * params)1996 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1997 			   struct ice_update_recipe_lkup_idx_params *params)
1998 {
1999 	struct ice_aqc_recipe_data_elem *rcp_list;
2000 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2001 	int status;
2002 
2003 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
2004 	if (!rcp_list)
2005 		return -ENOMEM;
2006 
2007 	/* read current recipe list from firmware */
2008 	rcp_list->recipe_indx = params->rid;
2009 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2010 	if (status) {
2011 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2012 			  params->rid, status);
2013 		goto error_out;
2014 	}
2015 
2016 	/* only modify existing recipe's lkup_idx and mask if valid, while
2017 	 * leaving all other fields the same, then update the recipe firmware
2018 	 */
2019 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2020 	if (params->mask_valid)
2021 		rcp_list->content.mask[params->lkup_idx] =
2022 			cpu_to_le16(params->mask);
2023 
2024 	if (params->ignore_valid)
2025 		rcp_list->content.lkup_indx[params->lkup_idx] |=
2026 			ICE_AQ_RECIPE_LKUP_IGNORE;
2027 
2028 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2029 	if (status)
2030 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2031 			  params->rid, params->lkup_idx, params->fv_idx,
2032 			  params->mask, params->mask_valid ? "true" : "false",
2033 			  status);
2034 
2035 error_out:
2036 	kfree(rcp_list);
2037 	return status;
2038 }
2039 
2040 /**
2041  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2042  * @hw: pointer to the HW struct
2043  * @profile_id: package profile ID to associate the recipe with
2044  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2045  * @cd: pointer to command details structure or NULL
2046  * Recipe to profile association (0x0291)
2047  */
2048 static int
ice_aq_map_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)2049 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2050 			     struct ice_sq_cd *cd)
2051 {
2052 	struct ice_aqc_recipe_to_profile *cmd;
2053 	struct ice_aq_desc desc;
2054 
2055 	cmd = &desc.params.recipe_to_profile;
2056 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2057 	cmd->profile_id = cpu_to_le16(profile_id);
2058 	/* Set the recipe ID bit in the bitmask to let the device know which
2059 	 * profile we are associating the recipe to
2060 	 */
2061 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
2062 
2063 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2064 }
2065 
2066 /**
2067  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2068  * @hw: pointer to the HW struct
2069  * @profile_id: package profile ID to associate the recipe with
2070  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2071  * @cd: pointer to command details structure or NULL
2072  * Associate profile ID with given recipe (0x0293)
2073  */
2074 static int
ice_aq_get_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)2075 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2076 			     struct ice_sq_cd *cd)
2077 {
2078 	struct ice_aqc_recipe_to_profile *cmd;
2079 	struct ice_aq_desc desc;
2080 	int status;
2081 
2082 	cmd = &desc.params.recipe_to_profile;
2083 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2084 	cmd->profile_id = cpu_to_le16(profile_id);
2085 
2086 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2087 	if (!status)
2088 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2089 
2090 	return status;
2091 }
2092 
2093 /**
2094  * ice_alloc_recipe - add recipe resource
2095  * @hw: pointer to the hardware structure
2096  * @rid: recipe ID returned as response to AQ call
2097  */
ice_alloc_recipe(struct ice_hw * hw,u16 * rid)2098 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2099 {
2100 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2101 	u16 buf_len;
2102 	int status;
2103 
2104 	buf_len = struct_size(sw_buf, elem, 1);
2105 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2106 	if (!sw_buf)
2107 		return -ENOMEM;
2108 
2109 	sw_buf->num_elems = cpu_to_le16(1);
2110 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2111 					ICE_AQC_RES_TYPE_S) |
2112 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2113 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2114 				       ice_aqc_opc_alloc_res, NULL);
2115 	if (!status)
2116 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2117 	kfree(sw_buf);
2118 
2119 	return status;
2120 }
2121 
2122 /**
2123  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2124  * @hw: pointer to hardware structure
2125  *
2126  * This function is used to populate recipe_to_profile matrix where index to
2127  * this array is the recipe ID and the element is the mapping of which profiles
2128  * is this recipe mapped to.
2129  */
ice_get_recp_to_prof_map(struct ice_hw * hw)2130 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2131 {
2132 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2133 	u16 i;
2134 
2135 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2136 		u16 j;
2137 
2138 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2139 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2140 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2141 			continue;
2142 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2143 			    ICE_MAX_NUM_RECIPES);
2144 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2145 			set_bit(i, recipe_to_profile[j]);
2146 	}
2147 }
2148 
2149 /**
2150  * ice_collect_result_idx - copy result index values
2151  * @buf: buffer that contains the result index
2152  * @recp: the recipe struct to copy data into
2153  */
2154 static void
ice_collect_result_idx(struct ice_aqc_recipe_data_elem * buf,struct ice_sw_recipe * recp)2155 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2156 		       struct ice_sw_recipe *recp)
2157 {
2158 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2159 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2160 			recp->res_idxs);
2161 }
2162 
2163 /**
2164  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2165  * @hw: pointer to hardware structure
2166  * @recps: struct that we need to populate
2167  * @rid: recipe ID that we are populating
2168  * @refresh_required: true if we should get recipe to profile mapping from FW
2169  *
2170  * This function is used to populate all the necessary entries into our
2171  * bookkeeping so that we have a current list of all the recipes that are
2172  * programmed in the firmware.
2173  */
2174 static int
ice_get_recp_frm_fw(struct ice_hw * hw,struct ice_sw_recipe * recps,u8 rid,bool * refresh_required)2175 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2176 		    bool *refresh_required)
2177 {
2178 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2179 	struct ice_aqc_recipe_data_elem *tmp;
2180 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2181 	struct ice_prot_lkup_ext *lkup_exts;
2182 	u8 fv_word_idx = 0;
2183 	u16 sub_recps;
2184 	int status;
2185 
2186 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2187 
2188 	/* we need a buffer big enough to accommodate all the recipes */
2189 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2190 	if (!tmp)
2191 		return -ENOMEM;
2192 
2193 	tmp[0].recipe_indx = rid;
2194 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2195 	/* non-zero status meaning recipe doesn't exist */
2196 	if (status)
2197 		goto err_unroll;
2198 
2199 	/* Get recipe to profile map so that we can get the fv from lkups that
2200 	 * we read for a recipe from FW. Since we want to minimize the number of
2201 	 * times we make this FW call, just make one call and cache the copy
2202 	 * until a new recipe is added. This operation is only required the
2203 	 * first time to get the changes from FW. Then to search existing
2204 	 * entries we don't need to update the cache again until another recipe
2205 	 * gets added.
2206 	 */
2207 	if (*refresh_required) {
2208 		ice_get_recp_to_prof_map(hw);
2209 		*refresh_required = false;
2210 	}
2211 
2212 	/* Start populating all the entries for recps[rid] based on lkups from
2213 	 * firmware. Note that we are only creating the root recipe in our
2214 	 * database.
2215 	 */
2216 	lkup_exts = &recps[rid].lkup_exts;
2217 
2218 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2219 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2220 		struct ice_recp_grp_entry *rg_entry;
2221 		u8 i, prof, idx, prot = 0;
2222 		bool is_root;
2223 		u16 off = 0;
2224 
2225 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2226 					GFP_KERNEL);
2227 		if (!rg_entry) {
2228 			status = -ENOMEM;
2229 			goto err_unroll;
2230 		}
2231 
2232 		idx = root_bufs.recipe_indx;
2233 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2234 
2235 		/* Mark all result indices in this chain */
2236 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2237 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2238 				result_bm);
2239 
2240 		/* get the first profile that is associated with rid */
2241 		prof = find_first_bit(recipe_to_profile[idx],
2242 				      ICE_MAX_NUM_PROFILES);
2243 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2244 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2245 
2246 			rg_entry->fv_idx[i] = lkup_indx;
2247 			rg_entry->fv_mask[i] =
2248 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2249 
2250 			/* If the recipe is a chained recipe then all its
2251 			 * child recipe's result will have a result index.
2252 			 * To fill fv_words we should not use those result
2253 			 * index, we only need the protocol ids and offsets.
2254 			 * We will skip all the fv_idx which stores result
2255 			 * index in them. We also need to skip any fv_idx which
2256 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2257 			 * valid offset value.
2258 			 */
2259 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2260 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2261 			    rg_entry->fv_idx[i] == 0)
2262 				continue;
2263 
2264 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2265 					  rg_entry->fv_idx[i], &prot, &off);
2266 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2267 			lkup_exts->fv_words[fv_word_idx].off = off;
2268 			lkup_exts->field_mask[fv_word_idx] =
2269 				rg_entry->fv_mask[i];
2270 			fv_word_idx++;
2271 		}
2272 		/* populate rg_list with the data from the child entry of this
2273 		 * recipe
2274 		 */
2275 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2276 
2277 		/* Propagate some data to the recipe database */
2278 		recps[idx].is_root = !!is_root;
2279 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2280 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2281 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2282 			recps[idx].chain_idx = root_bufs.content.result_indx &
2283 				~ICE_AQ_RECIPE_RESULT_EN;
2284 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2285 		} else {
2286 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2287 		}
2288 
2289 		if (!is_root)
2290 			continue;
2291 
2292 		/* Only do the following for root recipes entries */
2293 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2294 		       sizeof(recps[idx].r_bitmap));
2295 		recps[idx].root_rid = root_bufs.content.rid &
2296 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2297 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2298 	}
2299 
2300 	/* Complete initialization of the root recipe entry */
2301 	lkup_exts->n_val_words = fv_word_idx;
2302 	recps[rid].big_recp = (num_recps > 1);
2303 	recps[rid].n_grp_count = (u8)num_recps;
2304 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2305 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2306 					   GFP_KERNEL);
2307 	if (!recps[rid].root_buf) {
2308 		status = -ENOMEM;
2309 		goto err_unroll;
2310 	}
2311 
2312 	/* Copy result indexes */
2313 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2314 	recps[rid].recp_created = true;
2315 
2316 err_unroll:
2317 	kfree(tmp);
2318 	return status;
2319 }
2320 
2321 /* ice_init_port_info - Initialize port_info with switch configuration data
2322  * @pi: pointer to port_info
2323  * @vsi_port_num: VSI number or port number
2324  * @type: Type of switch element (port or VSI)
2325  * @swid: switch ID of the switch the element is attached to
2326  * @pf_vf_num: PF or VF number
2327  * @is_vf: true if the element is a VF, false otherwise
2328  */
2329 static void
ice_init_port_info(struct ice_port_info * pi,u16 vsi_port_num,u8 type,u16 swid,u16 pf_vf_num,bool is_vf)2330 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2331 		   u16 swid, u16 pf_vf_num, bool is_vf)
2332 {
2333 	switch (type) {
2334 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2335 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2336 		pi->sw_id = swid;
2337 		pi->pf_vf_num = pf_vf_num;
2338 		pi->is_vf = is_vf;
2339 		break;
2340 	default:
2341 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2342 		break;
2343 	}
2344 }
2345 
2346 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2347  * @hw: pointer to the hardware structure
2348  */
ice_get_initial_sw_cfg(struct ice_hw * hw)2349 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2350 {
2351 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2352 	u16 req_desc = 0;
2353 	u16 num_elems;
2354 	int status;
2355 	u16 i;
2356 
2357 	rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2358 	if (!rbuf)
2359 		return -ENOMEM;
2360 
2361 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2362 	 * to get all the switch configuration information. The need
2363 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2364 	 * writing a non-zero value in req_desc
2365 	 */
2366 	do {
2367 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2368 
2369 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2370 					   &req_desc, &num_elems, NULL);
2371 
2372 		if (status)
2373 			break;
2374 
2375 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2376 			u16 pf_vf_num, swid, vsi_port_num;
2377 			bool is_vf = false;
2378 			u8 res_type;
2379 
2380 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2381 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2382 
2383 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2384 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2385 
2386 			swid = le16_to_cpu(ele->swid);
2387 
2388 			if (le16_to_cpu(ele->pf_vf_num) &
2389 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2390 				is_vf = true;
2391 
2392 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2393 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2394 
2395 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2396 				/* FW VSI is not needed. Just continue. */
2397 				continue;
2398 			}
2399 
2400 			ice_init_port_info(hw->port_info, vsi_port_num,
2401 					   res_type, swid, pf_vf_num, is_vf);
2402 		}
2403 	} while (req_desc && !status);
2404 
2405 	kfree(rbuf);
2406 	return status;
2407 }
2408 
2409 /**
2410  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2411  * @hw: pointer to the hardware structure
2412  * @fi: filter info structure to fill/update
2413  *
2414  * This helper function populates the lb_en and lan_en elements of the provided
2415  * ice_fltr_info struct using the switch's type and characteristics of the
2416  * switch rule being configured.
2417  */
ice_fill_sw_info(struct ice_hw * hw,struct ice_fltr_info * fi)2418 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2419 {
2420 	fi->lb_en = false;
2421 	fi->lan_en = false;
2422 	if ((fi->flag & ICE_FLTR_TX) &&
2423 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2424 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2425 	     fi->fltr_act == ICE_FWD_TO_Q ||
2426 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2427 		/* Setting LB for prune actions will result in replicated
2428 		 * packets to the internal switch that will be dropped.
2429 		 */
2430 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2431 			fi->lb_en = true;
2432 
2433 		/* Set lan_en to TRUE if
2434 		 * 1. The switch is a VEB AND
2435 		 * 2
2436 		 * 2.1 The lookup is a directional lookup like ethertype,
2437 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2438 		 * and default-port OR
2439 		 * 2.2 The lookup is VLAN, OR
2440 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2441 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2442 		 *
2443 		 * OR
2444 		 *
2445 		 * The switch is a VEPA.
2446 		 *
2447 		 * In all other cases, the LAN enable has to be set to false.
2448 		 */
2449 		if (hw->evb_veb) {
2450 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2451 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2452 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2453 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2454 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2455 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2456 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2457 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2458 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2459 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2460 				fi->lan_en = true;
2461 		} else {
2462 			fi->lan_en = true;
2463 		}
2464 	}
2465 }
2466 
2467 /**
2468  * ice_fill_sw_rule - Helper function to fill switch rule structure
2469  * @hw: pointer to the hardware structure
2470  * @f_info: entry containing packet forwarding information
2471  * @s_rule: switch rule structure to be filled in based on mac_entry
2472  * @opc: switch rules population command type - pass in the command opcode
2473  */
2474 static void
ice_fill_sw_rule(struct ice_hw * hw,struct ice_fltr_info * f_info,struct ice_sw_rule_lkup_rx_tx * s_rule,enum ice_adminq_opc opc)2475 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2476 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2477 		 enum ice_adminq_opc opc)
2478 {
2479 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2480 	u16 vlan_tpid = ETH_P_8021Q;
2481 	void *daddr = NULL;
2482 	u16 eth_hdr_sz;
2483 	u8 *eth_hdr;
2484 	u32 act = 0;
2485 	__be16 *off;
2486 	u8 q_rgn;
2487 
2488 	if (opc == ice_aqc_opc_remove_sw_rules) {
2489 		s_rule->act = 0;
2490 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2491 		s_rule->hdr_len = 0;
2492 		return;
2493 	}
2494 
2495 	eth_hdr_sz = sizeof(dummy_eth_header);
2496 	eth_hdr = s_rule->hdr_data;
2497 
2498 	/* initialize the ether header with a dummy header */
2499 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2500 	ice_fill_sw_info(hw, f_info);
2501 
2502 	switch (f_info->fltr_act) {
2503 	case ICE_FWD_TO_VSI:
2504 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2505 			ICE_SINGLE_ACT_VSI_ID_M;
2506 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2507 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2508 				ICE_SINGLE_ACT_VALID_BIT;
2509 		break;
2510 	case ICE_FWD_TO_VSI_LIST:
2511 		act |= ICE_SINGLE_ACT_VSI_LIST;
2512 		act |= (f_info->fwd_id.vsi_list_id <<
2513 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2514 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2515 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2516 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2517 				ICE_SINGLE_ACT_VALID_BIT;
2518 		break;
2519 	case ICE_FWD_TO_Q:
2520 		act |= ICE_SINGLE_ACT_TO_Q;
2521 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2522 			ICE_SINGLE_ACT_Q_INDEX_M;
2523 		break;
2524 	case ICE_DROP_PACKET:
2525 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2526 			ICE_SINGLE_ACT_VALID_BIT;
2527 		break;
2528 	case ICE_FWD_TO_QGRP:
2529 		q_rgn = f_info->qgrp_size > 0 ?
2530 			(u8)ilog2(f_info->qgrp_size) : 0;
2531 		act |= ICE_SINGLE_ACT_TO_Q;
2532 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2533 			ICE_SINGLE_ACT_Q_INDEX_M;
2534 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2535 			ICE_SINGLE_ACT_Q_REGION_M;
2536 		break;
2537 	default:
2538 		return;
2539 	}
2540 
2541 	if (f_info->lb_en)
2542 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2543 	if (f_info->lan_en)
2544 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2545 
2546 	switch (f_info->lkup_type) {
2547 	case ICE_SW_LKUP_MAC:
2548 		daddr = f_info->l_data.mac.mac_addr;
2549 		break;
2550 	case ICE_SW_LKUP_VLAN:
2551 		vlan_id = f_info->l_data.vlan.vlan_id;
2552 		if (f_info->l_data.vlan.tpid_valid)
2553 			vlan_tpid = f_info->l_data.vlan.tpid;
2554 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2555 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2556 			act |= ICE_SINGLE_ACT_PRUNE;
2557 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2558 		}
2559 		break;
2560 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2561 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2562 		fallthrough;
2563 	case ICE_SW_LKUP_ETHERTYPE:
2564 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2565 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2566 		break;
2567 	case ICE_SW_LKUP_MAC_VLAN:
2568 		daddr = f_info->l_data.mac_vlan.mac_addr;
2569 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2570 		break;
2571 	case ICE_SW_LKUP_PROMISC_VLAN:
2572 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2573 		fallthrough;
2574 	case ICE_SW_LKUP_PROMISC:
2575 		daddr = f_info->l_data.mac_vlan.mac_addr;
2576 		break;
2577 	default:
2578 		break;
2579 	}
2580 
2581 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2582 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2583 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2584 
2585 	/* Recipe set depending on lookup type */
2586 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2587 	s_rule->src = cpu_to_le16(f_info->src);
2588 	s_rule->act = cpu_to_le32(act);
2589 
2590 	if (daddr)
2591 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2592 
2593 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2594 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2595 		*off = cpu_to_be16(vlan_id);
2596 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2597 		*off = cpu_to_be16(vlan_tpid);
2598 	}
2599 
2600 	/* Create the switch rule with the final dummy Ethernet header */
2601 	if (opc != ice_aqc_opc_update_sw_rules)
2602 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2603 }
2604 
2605 /**
2606  * ice_add_marker_act
2607  * @hw: pointer to the hardware structure
2608  * @m_ent: the management entry for which sw marker needs to be added
2609  * @sw_marker: sw marker to tag the Rx descriptor with
2610  * @l_id: large action resource ID
2611  *
2612  * Create a large action to hold software marker and update the switch rule
2613  * entry pointed by m_ent with newly created large action
2614  */
2615 static int
ice_add_marker_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 sw_marker,u16 l_id)2616 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2617 		   u16 sw_marker, u16 l_id)
2618 {
2619 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2620 	struct ice_sw_rule_lg_act *lg_act;
2621 	/* For software marker we need 3 large actions
2622 	 * 1. FWD action: FWD TO VSI or VSI LIST
2623 	 * 2. GENERIC VALUE action to hold the profile ID
2624 	 * 3. GENERIC VALUE action to hold the software marker ID
2625 	 */
2626 	const u16 num_lg_acts = 3;
2627 	u16 lg_act_size;
2628 	u16 rules_size;
2629 	int status;
2630 	u32 act;
2631 	u16 id;
2632 
2633 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2634 		return -EINVAL;
2635 
2636 	/* Create two back-to-back switch rules and submit them to the HW using
2637 	 * one memory buffer:
2638 	 *    1. Large Action
2639 	 *    2. Look up Tx Rx
2640 	 */
2641 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2642 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2643 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2644 	if (!lg_act)
2645 		return -ENOMEM;
2646 
2647 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2648 
2649 	/* Fill in the first switch rule i.e. large action */
2650 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2651 	lg_act->index = cpu_to_le16(l_id);
2652 	lg_act->size = cpu_to_le16(num_lg_acts);
2653 
2654 	/* First action VSI forwarding or VSI list forwarding depending on how
2655 	 * many VSIs
2656 	 */
2657 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2658 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2659 
2660 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2661 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2662 	if (m_ent->vsi_count > 1)
2663 		act |= ICE_LG_ACT_VSI_LIST;
2664 	lg_act->act[0] = cpu_to_le32(act);
2665 
2666 	/* Second action descriptor type */
2667 	act = ICE_LG_ACT_GENERIC;
2668 
2669 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2670 	lg_act->act[1] = cpu_to_le32(act);
2671 
2672 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2673 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2674 
2675 	/* Third action Marker value */
2676 	act |= ICE_LG_ACT_GENERIC;
2677 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2678 		ICE_LG_ACT_GENERIC_VALUE_M;
2679 
2680 	lg_act->act[2] = cpu_to_le32(act);
2681 
2682 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2683 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2684 			 ice_aqc_opc_update_sw_rules);
2685 
2686 	/* Update the action to point to the large action ID */
2687 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2688 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2689 				  ICE_SINGLE_ACT_PTR_VAL_M));
2690 
2691 	/* Use the filter rule ID of the previously created rule with single
2692 	 * act. Once the update happens, hardware will treat this as large
2693 	 * action
2694 	 */
2695 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2696 
2697 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2698 				 ice_aqc_opc_update_sw_rules, NULL);
2699 	if (!status) {
2700 		m_ent->lg_act_idx = l_id;
2701 		m_ent->sw_marker_id = sw_marker;
2702 	}
2703 
2704 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2705 	return status;
2706 }
2707 
2708 /**
2709  * ice_create_vsi_list_map
2710  * @hw: pointer to the hardware structure
2711  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2712  * @num_vsi: number of VSI handles in the array
2713  * @vsi_list_id: VSI list ID generated as part of allocate resource
2714  *
2715  * Helper function to create a new entry of VSI list ID to VSI mapping
2716  * using the given VSI list ID
2717  */
2718 static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id)2719 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2720 			u16 vsi_list_id)
2721 {
2722 	struct ice_switch_info *sw = hw->switch_info;
2723 	struct ice_vsi_list_map_info *v_map;
2724 	int i;
2725 
2726 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2727 	if (!v_map)
2728 		return NULL;
2729 
2730 	v_map->vsi_list_id = vsi_list_id;
2731 	v_map->ref_cnt = 1;
2732 	for (i = 0; i < num_vsi; i++)
2733 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2734 
2735 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2736 	return v_map;
2737 }
2738 
2739 /**
2740  * ice_update_vsi_list_rule
2741  * @hw: pointer to the hardware structure
2742  * @vsi_handle_arr: array of VSI handles to form a VSI list
2743  * @num_vsi: number of VSI handles in the array
2744  * @vsi_list_id: VSI list ID generated as part of allocate resource
2745  * @remove: Boolean value to indicate if this is a remove action
2746  * @opc: switch rules population command type - pass in the command opcode
2747  * @lkup_type: lookup type of the filter
2748  *
2749  * Call AQ command to add a new switch rule or update existing switch rule
2750  * using the given VSI list ID
2751  */
2752 static int
ice_update_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id,bool remove,enum ice_adminq_opc opc,enum ice_sw_lkup_type lkup_type)2753 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2754 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2755 			 enum ice_sw_lkup_type lkup_type)
2756 {
2757 	struct ice_sw_rule_vsi_list *s_rule;
2758 	u16 s_rule_size;
2759 	u16 rule_type;
2760 	int status;
2761 	int i;
2762 
2763 	if (!num_vsi)
2764 		return -EINVAL;
2765 
2766 	if (lkup_type == ICE_SW_LKUP_MAC ||
2767 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2768 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2769 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2770 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2771 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2772 	    lkup_type == ICE_SW_LKUP_DFLT)
2773 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2774 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2775 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2776 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2777 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2778 	else
2779 		return -EINVAL;
2780 
2781 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2782 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2783 	if (!s_rule)
2784 		return -ENOMEM;
2785 	for (i = 0; i < num_vsi; i++) {
2786 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2787 			status = -EINVAL;
2788 			goto exit;
2789 		}
2790 		/* AQ call requires hw_vsi_id(s) */
2791 		s_rule->vsi[i] =
2792 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2793 	}
2794 
2795 	s_rule->hdr.type = cpu_to_le16(rule_type);
2796 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2797 	s_rule->index = cpu_to_le16(vsi_list_id);
2798 
2799 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2800 
2801 exit:
2802 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2803 	return status;
2804 }
2805 
2806 /**
2807  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2808  * @hw: pointer to the HW struct
2809  * @vsi_handle_arr: array of VSI handles to form a VSI list
2810  * @num_vsi: number of VSI handles in the array
2811  * @vsi_list_id: stores the ID of the VSI list to be created
2812  * @lkup_type: switch rule filter's lookup type
2813  */
2814 static int
ice_create_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type)2815 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2816 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2817 {
2818 	int status;
2819 
2820 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2821 					    ice_aqc_opc_alloc_res);
2822 	if (status)
2823 		return status;
2824 
2825 	/* Update the newly created VSI list to include the specified VSIs */
2826 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2827 					*vsi_list_id, false,
2828 					ice_aqc_opc_add_sw_rules, lkup_type);
2829 }
2830 
2831 /**
2832  * ice_create_pkt_fwd_rule
2833  * @hw: pointer to the hardware structure
2834  * @f_entry: entry containing packet forwarding information
2835  *
2836  * Create switch rule with given filter information and add an entry
2837  * to the corresponding filter management list to track this switch rule
2838  * and VSI mapping
2839  */
2840 static int
ice_create_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)2841 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2842 			struct ice_fltr_list_entry *f_entry)
2843 {
2844 	struct ice_fltr_mgmt_list_entry *fm_entry;
2845 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2846 	enum ice_sw_lkup_type l_type;
2847 	struct ice_sw_recipe *recp;
2848 	int status;
2849 
2850 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2851 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2852 			      GFP_KERNEL);
2853 	if (!s_rule)
2854 		return -ENOMEM;
2855 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2856 				GFP_KERNEL);
2857 	if (!fm_entry) {
2858 		status = -ENOMEM;
2859 		goto ice_create_pkt_fwd_rule_exit;
2860 	}
2861 
2862 	fm_entry->fltr_info = f_entry->fltr_info;
2863 
2864 	/* Initialize all the fields for the management entry */
2865 	fm_entry->vsi_count = 1;
2866 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2867 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2868 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2869 
2870 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2871 			 ice_aqc_opc_add_sw_rules);
2872 
2873 	status = ice_aq_sw_rules(hw, s_rule,
2874 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2875 				 ice_aqc_opc_add_sw_rules, NULL);
2876 	if (status) {
2877 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2878 		goto ice_create_pkt_fwd_rule_exit;
2879 	}
2880 
2881 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2882 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2883 
2884 	/* The book keeping entries will get removed when base driver
2885 	 * calls remove filter AQ command
2886 	 */
2887 	l_type = fm_entry->fltr_info.lkup_type;
2888 	recp = &hw->switch_info->recp_list[l_type];
2889 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2890 
2891 ice_create_pkt_fwd_rule_exit:
2892 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2893 	return status;
2894 }
2895 
2896 /**
2897  * ice_update_pkt_fwd_rule
2898  * @hw: pointer to the hardware structure
2899  * @f_info: filter information for switch rule
2900  *
2901  * Call AQ command to update a previously created switch rule with a
2902  * VSI list ID
2903  */
2904 static int
ice_update_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_info * f_info)2905 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2906 {
2907 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2908 	int status;
2909 
2910 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2911 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2912 			      GFP_KERNEL);
2913 	if (!s_rule)
2914 		return -ENOMEM;
2915 
2916 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2917 
2918 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2919 
2920 	/* Update switch rule with new rule set to forward VSI list */
2921 	status = ice_aq_sw_rules(hw, s_rule,
2922 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2923 				 ice_aqc_opc_update_sw_rules, NULL);
2924 
2925 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2926 	return status;
2927 }
2928 
2929 /**
2930  * ice_update_sw_rule_bridge_mode
2931  * @hw: pointer to the HW struct
2932  *
2933  * Updates unicast switch filter rules based on VEB/VEPA mode
2934  */
ice_update_sw_rule_bridge_mode(struct ice_hw * hw)2935 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2936 {
2937 	struct ice_switch_info *sw = hw->switch_info;
2938 	struct ice_fltr_mgmt_list_entry *fm_entry;
2939 	struct list_head *rule_head;
2940 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2941 	int status = 0;
2942 
2943 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2944 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2945 
2946 	mutex_lock(rule_lock);
2947 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2948 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2949 		u8 *addr = fi->l_data.mac.mac_addr;
2950 
2951 		/* Update unicast Tx rules to reflect the selected
2952 		 * VEB/VEPA mode
2953 		 */
2954 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2955 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2956 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2957 		     fi->fltr_act == ICE_FWD_TO_Q ||
2958 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2959 			status = ice_update_pkt_fwd_rule(hw, fi);
2960 			if (status)
2961 				break;
2962 		}
2963 	}
2964 
2965 	mutex_unlock(rule_lock);
2966 
2967 	return status;
2968 }
2969 
2970 /**
2971  * ice_add_update_vsi_list
2972  * @hw: pointer to the hardware structure
2973  * @m_entry: pointer to current filter management list entry
2974  * @cur_fltr: filter information from the book keeping entry
2975  * @new_fltr: filter information with the new VSI to be added
2976  *
2977  * Call AQ command to add or update previously created VSI list with new VSI.
2978  *
2979  * Helper function to do book keeping associated with adding filter information
2980  * The algorithm to do the book keeping is described below :
2981  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2982  *	if only one VSI has been added till now
2983  *		Allocate a new VSI list and add two VSIs
2984  *		to this list using switch rule command
2985  *		Update the previously created switch rule with the
2986  *		newly created VSI list ID
2987  *	if a VSI list was previously created
2988  *		Add the new VSI to the previously created VSI list set
2989  *		using the update switch rule command
2990  */
2991 static int
ice_add_update_vsi_list(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_entry,struct ice_fltr_info * cur_fltr,struct ice_fltr_info * new_fltr)2992 ice_add_update_vsi_list(struct ice_hw *hw,
2993 			struct ice_fltr_mgmt_list_entry *m_entry,
2994 			struct ice_fltr_info *cur_fltr,
2995 			struct ice_fltr_info *new_fltr)
2996 {
2997 	u16 vsi_list_id = 0;
2998 	int status = 0;
2999 
3000 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3001 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3002 		return -EOPNOTSUPP;
3003 
3004 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3005 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3006 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3007 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3008 		return -EOPNOTSUPP;
3009 
3010 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3011 		/* Only one entry existed in the mapping and it was not already
3012 		 * a part of a VSI list. So, create a VSI list with the old and
3013 		 * new VSIs.
3014 		 */
3015 		struct ice_fltr_info tmp_fltr;
3016 		u16 vsi_handle_arr[2];
3017 
3018 		/* A rule already exists with the new VSI being added */
3019 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3020 			return -EEXIST;
3021 
3022 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
3023 		vsi_handle_arr[1] = new_fltr->vsi_handle;
3024 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3025 						  &vsi_list_id,
3026 						  new_fltr->lkup_type);
3027 		if (status)
3028 			return status;
3029 
3030 		tmp_fltr = *new_fltr;
3031 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3032 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3033 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3034 		/* Update the previous switch rule of "MAC forward to VSI" to
3035 		 * "MAC fwd to VSI list"
3036 		 */
3037 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3038 		if (status)
3039 			return status;
3040 
3041 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3042 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3043 		m_entry->vsi_list_info =
3044 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3045 						vsi_list_id);
3046 
3047 		if (!m_entry->vsi_list_info)
3048 			return -ENOMEM;
3049 
3050 		/* If this entry was large action then the large action needs
3051 		 * to be updated to point to FWD to VSI list
3052 		 */
3053 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3054 			status =
3055 			    ice_add_marker_act(hw, m_entry,
3056 					       m_entry->sw_marker_id,
3057 					       m_entry->lg_act_idx);
3058 	} else {
3059 		u16 vsi_handle = new_fltr->vsi_handle;
3060 		enum ice_adminq_opc opcode;
3061 
3062 		if (!m_entry->vsi_list_info)
3063 			return -EIO;
3064 
3065 		/* A rule already exists with the new VSI being added */
3066 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3067 			return 0;
3068 
3069 		/* Update the previously created VSI list set with
3070 		 * the new VSI ID passed in
3071 		 */
3072 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3073 		opcode = ice_aqc_opc_update_sw_rules;
3074 
3075 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3076 						  vsi_list_id, false, opcode,
3077 						  new_fltr->lkup_type);
3078 		/* update VSI list mapping info with new VSI ID */
3079 		if (!status)
3080 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3081 	}
3082 	if (!status)
3083 		m_entry->vsi_count++;
3084 	return status;
3085 }
3086 
3087 /**
3088  * ice_find_rule_entry - Search a rule entry
3089  * @hw: pointer to the hardware structure
3090  * @recp_id: lookup type for which the specified rule needs to be searched
3091  * @f_info: rule information
3092  *
3093  * Helper function to search for a given rule entry
3094  * Returns pointer to entry storing the rule if found
3095  */
3096 static struct ice_fltr_mgmt_list_entry *
ice_find_rule_entry(struct ice_hw * hw,u8 recp_id,struct ice_fltr_info * f_info)3097 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3098 {
3099 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3100 	struct ice_switch_info *sw = hw->switch_info;
3101 	struct list_head *list_head;
3102 
3103 	list_head = &sw->recp_list[recp_id].filt_rules;
3104 	list_for_each_entry(list_itr, list_head, list_entry) {
3105 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3106 			    sizeof(f_info->l_data)) &&
3107 		    f_info->flag == list_itr->fltr_info.flag) {
3108 			ret = list_itr;
3109 			break;
3110 		}
3111 	}
3112 	return ret;
3113 }
3114 
3115 /**
3116  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3117  * @hw: pointer to the hardware structure
3118  * @recp_id: lookup type for which VSI lists needs to be searched
3119  * @vsi_handle: VSI handle to be found in VSI list
3120  * @vsi_list_id: VSI list ID found containing vsi_handle
3121  *
3122  * Helper function to search a VSI list with single entry containing given VSI
3123  * handle element. This can be extended further to search VSI list with more
3124  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3125  */
3126 static struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw * hw,u8 recp_id,u16 vsi_handle,u16 * vsi_list_id)3127 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3128 			u16 *vsi_list_id)
3129 {
3130 	struct ice_vsi_list_map_info *map_info = NULL;
3131 	struct ice_switch_info *sw = hw->switch_info;
3132 	struct ice_fltr_mgmt_list_entry *list_itr;
3133 	struct list_head *list_head;
3134 
3135 	list_head = &sw->recp_list[recp_id].filt_rules;
3136 	list_for_each_entry(list_itr, list_head, list_entry) {
3137 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3138 			map_info = list_itr->vsi_list_info;
3139 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3140 				*vsi_list_id = map_info->vsi_list_id;
3141 				return map_info;
3142 			}
3143 		}
3144 	}
3145 	return NULL;
3146 }
3147 
3148 /**
3149  * ice_add_rule_internal - add rule for a given lookup type
3150  * @hw: pointer to the hardware structure
3151  * @recp_id: lookup type (recipe ID) for which rule has to be added
3152  * @f_entry: structure containing MAC forwarding information
3153  *
3154  * Adds or updates the rule lists for a given recipe
3155  */
3156 static int
ice_add_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)3157 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3158 		      struct ice_fltr_list_entry *f_entry)
3159 {
3160 	struct ice_switch_info *sw = hw->switch_info;
3161 	struct ice_fltr_info *new_fltr, *cur_fltr;
3162 	struct ice_fltr_mgmt_list_entry *m_entry;
3163 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3164 	int status = 0;
3165 
3166 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3167 		return -EINVAL;
3168 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3169 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3170 
3171 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3172 
3173 	mutex_lock(rule_lock);
3174 	new_fltr = &f_entry->fltr_info;
3175 	if (new_fltr->flag & ICE_FLTR_RX)
3176 		new_fltr->src = hw->port_info->lport;
3177 	else if (new_fltr->flag & ICE_FLTR_TX)
3178 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3179 
3180 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3181 	if (!m_entry) {
3182 		mutex_unlock(rule_lock);
3183 		return ice_create_pkt_fwd_rule(hw, f_entry);
3184 	}
3185 
3186 	cur_fltr = &m_entry->fltr_info;
3187 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3188 	mutex_unlock(rule_lock);
3189 
3190 	return status;
3191 }
3192 
3193 /**
3194  * ice_remove_vsi_list_rule
3195  * @hw: pointer to the hardware structure
3196  * @vsi_list_id: VSI list ID generated as part of allocate resource
3197  * @lkup_type: switch rule filter lookup type
3198  *
3199  * The VSI list should be emptied before this function is called to remove the
3200  * VSI list.
3201  */
3202 static int
ice_remove_vsi_list_rule(struct ice_hw * hw,u16 vsi_list_id,enum ice_sw_lkup_type lkup_type)3203 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3204 			 enum ice_sw_lkup_type lkup_type)
3205 {
3206 	struct ice_sw_rule_vsi_list *s_rule;
3207 	u16 s_rule_size;
3208 	int status;
3209 
3210 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3211 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3212 	if (!s_rule)
3213 		return -ENOMEM;
3214 
3215 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3216 	s_rule->index = cpu_to_le16(vsi_list_id);
3217 
3218 	/* Free the vsi_list resource that we allocated. It is assumed that the
3219 	 * list is empty at this point.
3220 	 */
3221 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3222 					    ice_aqc_opc_free_res);
3223 
3224 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3225 	return status;
3226 }
3227 
3228 /**
3229  * ice_rem_update_vsi_list
3230  * @hw: pointer to the hardware structure
3231  * @vsi_handle: VSI handle of the VSI to remove
3232  * @fm_list: filter management entry for which the VSI list management needs to
3233  *           be done
3234  */
3235 static int
ice_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_fltr_mgmt_list_entry * fm_list)3236 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3237 			struct ice_fltr_mgmt_list_entry *fm_list)
3238 {
3239 	enum ice_sw_lkup_type lkup_type;
3240 	u16 vsi_list_id;
3241 	int status = 0;
3242 
3243 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3244 	    fm_list->vsi_count == 0)
3245 		return -EINVAL;
3246 
3247 	/* A rule with the VSI being removed does not exist */
3248 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3249 		return -ENOENT;
3250 
3251 	lkup_type = fm_list->fltr_info.lkup_type;
3252 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3253 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3254 					  ice_aqc_opc_update_sw_rules,
3255 					  lkup_type);
3256 	if (status)
3257 		return status;
3258 
3259 	fm_list->vsi_count--;
3260 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3261 
3262 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3263 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3264 		struct ice_vsi_list_map_info *vsi_list_info =
3265 			fm_list->vsi_list_info;
3266 		u16 rem_vsi_handle;
3267 
3268 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3269 						ICE_MAX_VSI);
3270 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3271 			return -EIO;
3272 
3273 		/* Make sure VSI list is empty before removing it below */
3274 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3275 						  vsi_list_id, true,
3276 						  ice_aqc_opc_update_sw_rules,
3277 						  lkup_type);
3278 		if (status)
3279 			return status;
3280 
3281 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3282 		tmp_fltr_info.fwd_id.hw_vsi_id =
3283 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3284 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3285 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3286 		if (status) {
3287 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3288 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3289 			return status;
3290 		}
3291 
3292 		fm_list->fltr_info = tmp_fltr_info;
3293 	}
3294 
3295 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3296 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3297 		struct ice_vsi_list_map_info *vsi_list_info =
3298 			fm_list->vsi_list_info;
3299 
3300 		/* Remove the VSI list since it is no longer used */
3301 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3302 		if (status) {
3303 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3304 				  vsi_list_id, status);
3305 			return status;
3306 		}
3307 
3308 		list_del(&vsi_list_info->list_entry);
3309 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3310 		fm_list->vsi_list_info = NULL;
3311 	}
3312 
3313 	return status;
3314 }
3315 
3316 /**
3317  * ice_remove_rule_internal - Remove a filter rule of a given type
3318  * @hw: pointer to the hardware structure
3319  * @recp_id: recipe ID for which the rule needs to removed
3320  * @f_entry: rule entry containing filter information
3321  */
3322 static int
ice_remove_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)3323 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3324 			 struct ice_fltr_list_entry *f_entry)
3325 {
3326 	struct ice_switch_info *sw = hw->switch_info;
3327 	struct ice_fltr_mgmt_list_entry *list_elem;
3328 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3329 	bool remove_rule = false;
3330 	u16 vsi_handle;
3331 	int status = 0;
3332 
3333 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3334 		return -EINVAL;
3335 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3336 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3337 
3338 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3339 	mutex_lock(rule_lock);
3340 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3341 	if (!list_elem) {
3342 		status = -ENOENT;
3343 		goto exit;
3344 	}
3345 
3346 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3347 		remove_rule = true;
3348 	} else if (!list_elem->vsi_list_info) {
3349 		status = -ENOENT;
3350 		goto exit;
3351 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3352 		/* a ref_cnt > 1 indicates that the vsi_list is being
3353 		 * shared by multiple rules. Decrement the ref_cnt and
3354 		 * remove this rule, but do not modify the list, as it
3355 		 * is in-use by other rules.
3356 		 */
3357 		list_elem->vsi_list_info->ref_cnt--;
3358 		remove_rule = true;
3359 	} else {
3360 		/* a ref_cnt of 1 indicates the vsi_list is only used
3361 		 * by one rule. However, the original removal request is only
3362 		 * for a single VSI. Update the vsi_list first, and only
3363 		 * remove the rule if there are no further VSIs in this list.
3364 		 */
3365 		vsi_handle = f_entry->fltr_info.vsi_handle;
3366 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3367 		if (status)
3368 			goto exit;
3369 		/* if VSI count goes to zero after updating the VSI list */
3370 		if (list_elem->vsi_count == 0)
3371 			remove_rule = true;
3372 	}
3373 
3374 	if (remove_rule) {
3375 		/* Remove the lookup rule */
3376 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3377 
3378 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3379 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3380 				      GFP_KERNEL);
3381 		if (!s_rule) {
3382 			status = -ENOMEM;
3383 			goto exit;
3384 		}
3385 
3386 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3387 				 ice_aqc_opc_remove_sw_rules);
3388 
3389 		status = ice_aq_sw_rules(hw, s_rule,
3390 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3391 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3392 
3393 		/* Remove a book keeping from the list */
3394 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3395 
3396 		if (status)
3397 			goto exit;
3398 
3399 		list_del(&list_elem->list_entry);
3400 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3401 	}
3402 exit:
3403 	mutex_unlock(rule_lock);
3404 	return status;
3405 }
3406 
3407 /**
3408  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3409  * @hw: pointer to the hardware structure
3410  * @mac: MAC address to be checked (for MAC filter)
3411  * @vsi_handle: check MAC filter for this VSI
3412  */
ice_mac_fltr_exist(struct ice_hw * hw,u8 * mac,u16 vsi_handle)3413 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3414 {
3415 	struct ice_fltr_mgmt_list_entry *entry;
3416 	struct list_head *rule_head;
3417 	struct ice_switch_info *sw;
3418 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3419 	u16 hw_vsi_id;
3420 
3421 	if (!ice_is_vsi_valid(hw, vsi_handle))
3422 		return false;
3423 
3424 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3425 	sw = hw->switch_info;
3426 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3427 	if (!rule_head)
3428 		return false;
3429 
3430 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3431 	mutex_lock(rule_lock);
3432 	list_for_each_entry(entry, rule_head, list_entry) {
3433 		struct ice_fltr_info *f_info = &entry->fltr_info;
3434 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3435 
3436 		if (is_zero_ether_addr(mac_addr))
3437 			continue;
3438 
3439 		if (f_info->flag != ICE_FLTR_TX ||
3440 		    f_info->src_id != ICE_SRC_ID_VSI ||
3441 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3442 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3443 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3444 			continue;
3445 
3446 		if (ether_addr_equal(mac, mac_addr)) {
3447 			mutex_unlock(rule_lock);
3448 			return true;
3449 		}
3450 	}
3451 	mutex_unlock(rule_lock);
3452 	return false;
3453 }
3454 
3455 /**
3456  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3457  * @hw: pointer to the hardware structure
3458  * @vlan_id: VLAN ID
3459  * @vsi_handle: check MAC filter for this VSI
3460  */
ice_vlan_fltr_exist(struct ice_hw * hw,u16 vlan_id,u16 vsi_handle)3461 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3462 {
3463 	struct ice_fltr_mgmt_list_entry *entry;
3464 	struct list_head *rule_head;
3465 	struct ice_switch_info *sw;
3466 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3467 	u16 hw_vsi_id;
3468 
3469 	if (vlan_id > ICE_MAX_VLAN_ID)
3470 		return false;
3471 
3472 	if (!ice_is_vsi_valid(hw, vsi_handle))
3473 		return false;
3474 
3475 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3476 	sw = hw->switch_info;
3477 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3478 	if (!rule_head)
3479 		return false;
3480 
3481 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3482 	mutex_lock(rule_lock);
3483 	list_for_each_entry(entry, rule_head, list_entry) {
3484 		struct ice_fltr_info *f_info = &entry->fltr_info;
3485 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3486 		struct ice_vsi_list_map_info *map_info;
3487 
3488 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3489 			continue;
3490 
3491 		if (f_info->flag != ICE_FLTR_TX ||
3492 		    f_info->src_id != ICE_SRC_ID_VSI ||
3493 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3494 			continue;
3495 
3496 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3497 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3498 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3499 			continue;
3500 
3501 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3502 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3503 				continue;
3504 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3505 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3506 			 * that VSI being checked is part of VSI list
3507 			 */
3508 			if (entry->vsi_count == 1 &&
3509 			    entry->vsi_list_info) {
3510 				map_info = entry->vsi_list_info;
3511 				if (!test_bit(vsi_handle, map_info->vsi_map))
3512 					continue;
3513 			}
3514 		}
3515 
3516 		if (vlan_id == entry_vlan_id) {
3517 			mutex_unlock(rule_lock);
3518 			return true;
3519 		}
3520 	}
3521 	mutex_unlock(rule_lock);
3522 
3523 	return false;
3524 }
3525 
3526 /**
3527  * ice_add_mac - Add a MAC address based filter rule
3528  * @hw: pointer to the hardware structure
3529  * @m_list: list of MAC addresses and forwarding information
3530  */
ice_add_mac(struct ice_hw * hw,struct list_head * m_list)3531 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3532 {
3533 	struct ice_fltr_list_entry *m_list_itr;
3534 	int status = 0;
3535 
3536 	if (!m_list || !hw)
3537 		return -EINVAL;
3538 
3539 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3540 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3541 		u16 vsi_handle;
3542 		u16 hw_vsi_id;
3543 
3544 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3545 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3546 		if (!ice_is_vsi_valid(hw, vsi_handle))
3547 			return -EINVAL;
3548 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3549 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3550 		/* update the src in case it is VSI num */
3551 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3552 			return -EINVAL;
3553 		m_list_itr->fltr_info.src = hw_vsi_id;
3554 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3555 		    is_zero_ether_addr(add))
3556 			return -EINVAL;
3557 
3558 		m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3559 							   m_list_itr);
3560 		if (m_list_itr->status)
3561 			return m_list_itr->status;
3562 	}
3563 
3564 	return status;
3565 }
3566 
3567 /**
3568  * ice_add_vlan_internal - Add one VLAN based filter rule
3569  * @hw: pointer to the hardware structure
3570  * @f_entry: filter entry containing one VLAN information
3571  */
3572 static int
ice_add_vlan_internal(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)3573 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3574 {
3575 	struct ice_switch_info *sw = hw->switch_info;
3576 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3577 	struct ice_fltr_info *new_fltr, *cur_fltr;
3578 	enum ice_sw_lkup_type lkup_type;
3579 	u16 vsi_list_id = 0, vsi_handle;
3580 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3581 	int status = 0;
3582 
3583 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3584 		return -EINVAL;
3585 
3586 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3587 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3588 	new_fltr = &f_entry->fltr_info;
3589 
3590 	/* VLAN ID should only be 12 bits */
3591 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3592 		return -EINVAL;
3593 
3594 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3595 		return -EINVAL;
3596 
3597 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3598 	lkup_type = new_fltr->lkup_type;
3599 	vsi_handle = new_fltr->vsi_handle;
3600 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3601 	mutex_lock(rule_lock);
3602 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3603 	if (!v_list_itr) {
3604 		struct ice_vsi_list_map_info *map_info = NULL;
3605 
3606 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3607 			/* All VLAN pruning rules use a VSI list. Check if
3608 			 * there is already a VSI list containing VSI that we
3609 			 * want to add. If found, use the same vsi_list_id for
3610 			 * this new VLAN rule or else create a new list.
3611 			 */
3612 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3613 							   vsi_handle,
3614 							   &vsi_list_id);
3615 			if (!map_info) {
3616 				status = ice_create_vsi_list_rule(hw,
3617 								  &vsi_handle,
3618 								  1,
3619 								  &vsi_list_id,
3620 								  lkup_type);
3621 				if (status)
3622 					goto exit;
3623 			}
3624 			/* Convert the action to forwarding to a VSI list. */
3625 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3626 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3627 		}
3628 
3629 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3630 		if (!status) {
3631 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3632 							 new_fltr);
3633 			if (!v_list_itr) {
3634 				status = -ENOENT;
3635 				goto exit;
3636 			}
3637 			/* reuse VSI list for new rule and increment ref_cnt */
3638 			if (map_info) {
3639 				v_list_itr->vsi_list_info = map_info;
3640 				map_info->ref_cnt++;
3641 			} else {
3642 				v_list_itr->vsi_list_info =
3643 					ice_create_vsi_list_map(hw, &vsi_handle,
3644 								1, vsi_list_id);
3645 			}
3646 		}
3647 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3648 		/* Update existing VSI list to add new VSI ID only if it used
3649 		 * by one VLAN rule.
3650 		 */
3651 		cur_fltr = &v_list_itr->fltr_info;
3652 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3653 						 new_fltr);
3654 	} else {
3655 		/* If VLAN rule exists and VSI list being used by this rule is
3656 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3657 		 * list appending previous VSI with new VSI and update existing
3658 		 * VLAN rule to point to new VSI list ID
3659 		 */
3660 		struct ice_fltr_info tmp_fltr;
3661 		u16 vsi_handle_arr[2];
3662 		u16 cur_handle;
3663 
3664 		/* Current implementation only supports reusing VSI list with
3665 		 * one VSI count. We should never hit below condition
3666 		 */
3667 		if (v_list_itr->vsi_count > 1 &&
3668 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3669 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3670 			status = -EIO;
3671 			goto exit;
3672 		}
3673 
3674 		cur_handle =
3675 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3676 				       ICE_MAX_VSI);
3677 
3678 		/* A rule already exists with the new VSI being added */
3679 		if (cur_handle == vsi_handle) {
3680 			status = -EEXIST;
3681 			goto exit;
3682 		}
3683 
3684 		vsi_handle_arr[0] = cur_handle;
3685 		vsi_handle_arr[1] = vsi_handle;
3686 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3687 						  &vsi_list_id, lkup_type);
3688 		if (status)
3689 			goto exit;
3690 
3691 		tmp_fltr = v_list_itr->fltr_info;
3692 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3693 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3694 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3695 		/* Update the previous switch rule to a new VSI list which
3696 		 * includes current VSI that is requested
3697 		 */
3698 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3699 		if (status)
3700 			goto exit;
3701 
3702 		/* before overriding VSI list map info. decrement ref_cnt of
3703 		 * previous VSI list
3704 		 */
3705 		v_list_itr->vsi_list_info->ref_cnt--;
3706 
3707 		/* now update to newly created list */
3708 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3709 		v_list_itr->vsi_list_info =
3710 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3711 						vsi_list_id);
3712 		v_list_itr->vsi_count++;
3713 	}
3714 
3715 exit:
3716 	mutex_unlock(rule_lock);
3717 	return status;
3718 }
3719 
3720 /**
3721  * ice_add_vlan - Add VLAN based filter rule
3722  * @hw: pointer to the hardware structure
3723  * @v_list: list of VLAN entries and forwarding information
3724  */
ice_add_vlan(struct ice_hw * hw,struct list_head * v_list)3725 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3726 {
3727 	struct ice_fltr_list_entry *v_list_itr;
3728 
3729 	if (!v_list || !hw)
3730 		return -EINVAL;
3731 
3732 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3733 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3734 			return -EINVAL;
3735 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3736 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3737 		if (v_list_itr->status)
3738 			return v_list_itr->status;
3739 	}
3740 	return 0;
3741 }
3742 
3743 /**
3744  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3745  * @hw: pointer to the hardware structure
3746  * @em_list: list of ether type MAC filter, MAC is optional
3747  *
3748  * This function requires the caller to populate the entries in
3749  * the filter list with the necessary fields (including flags to
3750  * indicate Tx or Rx rules).
3751  */
ice_add_eth_mac(struct ice_hw * hw,struct list_head * em_list)3752 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3753 {
3754 	struct ice_fltr_list_entry *em_list_itr;
3755 
3756 	if (!em_list || !hw)
3757 		return -EINVAL;
3758 
3759 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3760 		enum ice_sw_lkup_type l_type =
3761 			em_list_itr->fltr_info.lkup_type;
3762 
3763 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3764 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3765 			return -EINVAL;
3766 
3767 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3768 							    em_list_itr);
3769 		if (em_list_itr->status)
3770 			return em_list_itr->status;
3771 	}
3772 	return 0;
3773 }
3774 
3775 /**
3776  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3777  * @hw: pointer to the hardware structure
3778  * @em_list: list of ethertype or ethertype MAC entries
3779  */
ice_remove_eth_mac(struct ice_hw * hw,struct list_head * em_list)3780 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3781 {
3782 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3783 
3784 	if (!em_list || !hw)
3785 		return -EINVAL;
3786 
3787 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3788 		enum ice_sw_lkup_type l_type =
3789 			em_list_itr->fltr_info.lkup_type;
3790 
3791 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3792 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3793 			return -EINVAL;
3794 
3795 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3796 							       em_list_itr);
3797 		if (em_list_itr->status)
3798 			return em_list_itr->status;
3799 	}
3800 	return 0;
3801 }
3802 
3803 /**
3804  * ice_rem_sw_rule_info
3805  * @hw: pointer to the hardware structure
3806  * @rule_head: pointer to the switch list structure that we want to delete
3807  */
3808 static void
ice_rem_sw_rule_info(struct ice_hw * hw,struct list_head * rule_head)3809 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3810 {
3811 	if (!list_empty(rule_head)) {
3812 		struct ice_fltr_mgmt_list_entry *entry;
3813 		struct ice_fltr_mgmt_list_entry *tmp;
3814 
3815 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3816 			list_del(&entry->list_entry);
3817 			devm_kfree(ice_hw_to_dev(hw), entry);
3818 		}
3819 	}
3820 }
3821 
3822 /**
3823  * ice_rem_adv_rule_info
3824  * @hw: pointer to the hardware structure
3825  * @rule_head: pointer to the switch list structure that we want to delete
3826  */
3827 static void
ice_rem_adv_rule_info(struct ice_hw * hw,struct list_head * rule_head)3828 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3829 {
3830 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3831 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3832 
3833 	if (list_empty(rule_head))
3834 		return;
3835 
3836 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3837 		list_del(&lst_itr->list_entry);
3838 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3839 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3840 	}
3841 }
3842 
3843 /**
3844  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3845  * @pi: pointer to the port_info structure
3846  * @vsi_handle: VSI handle to set as default
3847  * @set: true to add the above mentioned switch rule, false to remove it
3848  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3849  *
3850  * add filter rule to set/unset given VSI as default VSI for the switch
3851  * (represented by swid)
3852  */
3853 int
ice_cfg_dflt_vsi(struct ice_port_info * pi,u16 vsi_handle,bool set,u8 direction)3854 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3855 		 u8 direction)
3856 {
3857 	struct ice_fltr_list_entry f_list_entry;
3858 	struct ice_fltr_info f_info;
3859 	struct ice_hw *hw = pi->hw;
3860 	u16 hw_vsi_id;
3861 	int status;
3862 
3863 	if (!ice_is_vsi_valid(hw, vsi_handle))
3864 		return -EINVAL;
3865 
3866 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3867 
3868 	memset(&f_info, 0, sizeof(f_info));
3869 
3870 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3871 	f_info.flag = direction;
3872 	f_info.fltr_act = ICE_FWD_TO_VSI;
3873 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3874 	f_info.vsi_handle = vsi_handle;
3875 
3876 	if (f_info.flag & ICE_FLTR_RX) {
3877 		f_info.src = hw->port_info->lport;
3878 		f_info.src_id = ICE_SRC_ID_LPORT;
3879 	} else if (f_info.flag & ICE_FLTR_TX) {
3880 		f_info.src_id = ICE_SRC_ID_VSI;
3881 		f_info.src = hw_vsi_id;
3882 	}
3883 	f_list_entry.fltr_info = f_info;
3884 
3885 	if (set)
3886 		status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3887 					       &f_list_entry);
3888 	else
3889 		status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3890 						  &f_list_entry);
3891 
3892 	return status;
3893 }
3894 
3895 /**
3896  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3897  * @fm_entry: filter entry to inspect
3898  * @vsi_handle: VSI handle to compare with filter info
3899  */
3900 static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry * fm_entry,u16 vsi_handle)3901 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3902 {
3903 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3904 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3905 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3906 		 fm_entry->vsi_list_info &&
3907 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3908 }
3909 
3910 /**
3911  * ice_check_if_dflt_vsi - check if VSI is default VSI
3912  * @pi: pointer to the port_info structure
3913  * @vsi_handle: vsi handle to check for in filter list
3914  * @rule_exists: indicates if there are any VSI's in the rule list
3915  *
3916  * checks if the VSI is in a default VSI list, and also indicates
3917  * if the default VSI list is empty
3918  */
3919 bool
ice_check_if_dflt_vsi(struct ice_port_info * pi,u16 vsi_handle,bool * rule_exists)3920 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3921 		      bool *rule_exists)
3922 {
3923 	struct ice_fltr_mgmt_list_entry *fm_entry;
3924 	struct ice_sw_recipe *recp_list;
3925 	struct list_head *rule_head;
3926 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3927 	bool ret = false;
3928 
3929 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3930 	rule_lock = &recp_list->filt_rule_lock;
3931 	rule_head = &recp_list->filt_rules;
3932 
3933 	mutex_lock(rule_lock);
3934 
3935 	if (rule_exists && !list_empty(rule_head))
3936 		*rule_exists = true;
3937 
3938 	list_for_each_entry(fm_entry, rule_head, list_entry) {
3939 		if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3940 			ret = true;
3941 			break;
3942 		}
3943 	}
3944 
3945 	mutex_unlock(rule_lock);
3946 
3947 	return ret;
3948 }
3949 
3950 /**
3951  * ice_remove_mac - remove a MAC address based filter rule
3952  * @hw: pointer to the hardware structure
3953  * @m_list: list of MAC addresses and forwarding information
3954  *
3955  * This function removes either a MAC filter rule or a specific VSI from a
3956  * VSI list for a multicast MAC address.
3957  *
3958  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3959  * be aware that this call will only work if all the entries passed into m_list
3960  * were added previously. It will not attempt to do a partial remove of entries
3961  * that were found.
3962  */
ice_remove_mac(struct ice_hw * hw,struct list_head * m_list)3963 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3964 {
3965 	struct ice_fltr_list_entry *list_itr, *tmp;
3966 
3967 	if (!m_list)
3968 		return -EINVAL;
3969 
3970 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3971 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3972 		u16 vsi_handle;
3973 
3974 		if (l_type != ICE_SW_LKUP_MAC)
3975 			return -EINVAL;
3976 
3977 		vsi_handle = list_itr->fltr_info.vsi_handle;
3978 		if (!ice_is_vsi_valid(hw, vsi_handle))
3979 			return -EINVAL;
3980 
3981 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3982 					ice_get_hw_vsi_num(hw, vsi_handle);
3983 
3984 		list_itr->status = ice_remove_rule_internal(hw,
3985 							    ICE_SW_LKUP_MAC,
3986 							    list_itr);
3987 		if (list_itr->status)
3988 			return list_itr->status;
3989 	}
3990 	return 0;
3991 }
3992 
3993 /**
3994  * ice_remove_vlan - Remove VLAN based filter rule
3995  * @hw: pointer to the hardware structure
3996  * @v_list: list of VLAN entries and forwarding information
3997  */
ice_remove_vlan(struct ice_hw * hw,struct list_head * v_list)3998 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3999 {
4000 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4001 
4002 	if (!v_list || !hw)
4003 		return -EINVAL;
4004 
4005 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4006 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4007 
4008 		if (l_type != ICE_SW_LKUP_VLAN)
4009 			return -EINVAL;
4010 		v_list_itr->status = ice_remove_rule_internal(hw,
4011 							      ICE_SW_LKUP_VLAN,
4012 							      v_list_itr);
4013 		if (v_list_itr->status)
4014 			return v_list_itr->status;
4015 	}
4016 	return 0;
4017 }
4018 
4019 /**
4020  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4021  * @hw: pointer to the hardware structure
4022  * @vsi_handle: VSI handle to remove filters from
4023  * @vsi_list_head: pointer to the list to add entry to
4024  * @fi: pointer to fltr_info of filter entry to copy & add
4025  *
4026  * Helper function, used when creating a list of filters to remove from
4027  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4028  * original filter entry, with the exception of fltr_info.fltr_act and
4029  * fltr_info.fwd_id fields. These are set such that later logic can
4030  * extract which VSI to remove the fltr from, and pass on that information.
4031  */
4032 static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * vsi_list_head,struct ice_fltr_info * fi)4033 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4034 			       struct list_head *vsi_list_head,
4035 			       struct ice_fltr_info *fi)
4036 {
4037 	struct ice_fltr_list_entry *tmp;
4038 
4039 	/* this memory is freed up in the caller function
4040 	 * once filters for this VSI are removed
4041 	 */
4042 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4043 	if (!tmp)
4044 		return -ENOMEM;
4045 
4046 	tmp->fltr_info = *fi;
4047 
4048 	/* Overwrite these fields to indicate which VSI to remove filter from,
4049 	 * so find and remove logic can extract the information from the
4050 	 * list entries. Note that original entries will still have proper
4051 	 * values.
4052 	 */
4053 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4054 	tmp->fltr_info.vsi_handle = vsi_handle;
4055 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4056 
4057 	list_add(&tmp->list_entry, vsi_list_head);
4058 
4059 	return 0;
4060 }
4061 
4062 /**
4063  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4064  * @hw: pointer to the hardware structure
4065  * @vsi_handle: VSI handle to remove filters from
4066  * @lkup_list_head: pointer to the list that has certain lookup type filters
4067  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4068  *
4069  * Locates all filters in lkup_list_head that are used by the given VSI,
4070  * and adds COPIES of those entries to vsi_list_head (intended to be used
4071  * to remove the listed filters).
4072  * Note that this means all entries in vsi_list_head must be explicitly
4073  * deallocated by the caller when done with list.
4074  */
4075 static int
ice_add_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * lkup_list_head,struct list_head * vsi_list_head)4076 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4077 			 struct list_head *lkup_list_head,
4078 			 struct list_head *vsi_list_head)
4079 {
4080 	struct ice_fltr_mgmt_list_entry *fm_entry;
4081 	int status = 0;
4082 
4083 	/* check to make sure VSI ID is valid and within boundary */
4084 	if (!ice_is_vsi_valid(hw, vsi_handle))
4085 		return -EINVAL;
4086 
4087 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4088 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4089 			continue;
4090 
4091 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4092 							vsi_list_head,
4093 							&fm_entry->fltr_info);
4094 		if (status)
4095 			return status;
4096 	}
4097 	return status;
4098 }
4099 
4100 /**
4101  * ice_determine_promisc_mask
4102  * @fi: filter info to parse
4103  *
4104  * Helper function to determine which ICE_PROMISC_ mask corresponds
4105  * to given filter into.
4106  */
ice_determine_promisc_mask(struct ice_fltr_info * fi)4107 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4108 {
4109 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4110 	u8 *macaddr = fi->l_data.mac.mac_addr;
4111 	bool is_tx_fltr = false;
4112 	u8 promisc_mask = 0;
4113 
4114 	if (fi->flag == ICE_FLTR_TX)
4115 		is_tx_fltr = true;
4116 
4117 	if (is_broadcast_ether_addr(macaddr))
4118 		promisc_mask |= is_tx_fltr ?
4119 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4120 	else if (is_multicast_ether_addr(macaddr))
4121 		promisc_mask |= is_tx_fltr ?
4122 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4123 	else if (is_unicast_ether_addr(macaddr))
4124 		promisc_mask |= is_tx_fltr ?
4125 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4126 	if (vid)
4127 		promisc_mask |= is_tx_fltr ?
4128 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4129 
4130 	return promisc_mask;
4131 }
4132 
4133 /**
4134  * ice_remove_promisc - Remove promisc based filter rules
4135  * @hw: pointer to the hardware structure
4136  * @recp_id: recipe ID for which the rule needs to removed
4137  * @v_list: list of promisc entries
4138  */
4139 static int
ice_remove_promisc(struct ice_hw * hw,u8 recp_id,struct list_head * v_list)4140 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4141 {
4142 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4143 
4144 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4145 		v_list_itr->status =
4146 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4147 		if (v_list_itr->status)
4148 			return v_list_itr->status;
4149 	}
4150 	return 0;
4151 }
4152 
4153 /**
4154  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4155  * @hw: pointer to the hardware structure
4156  * @vsi_handle: VSI handle to clear mode
4157  * @promisc_mask: mask of promiscuous config bits to clear
4158  * @vid: VLAN ID to clear VLAN promiscuous
4159  */
4160 int
ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)4161 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4162 		      u16 vid)
4163 {
4164 	struct ice_switch_info *sw = hw->switch_info;
4165 	struct ice_fltr_list_entry *fm_entry, *tmp;
4166 	struct list_head remove_list_head;
4167 	struct ice_fltr_mgmt_list_entry *itr;
4168 	struct list_head *rule_head;
4169 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4170 	int status = 0;
4171 	u8 recipe_id;
4172 
4173 	if (!ice_is_vsi_valid(hw, vsi_handle))
4174 		return -EINVAL;
4175 
4176 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4177 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4178 	else
4179 		recipe_id = ICE_SW_LKUP_PROMISC;
4180 
4181 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4182 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4183 
4184 	INIT_LIST_HEAD(&remove_list_head);
4185 
4186 	mutex_lock(rule_lock);
4187 	list_for_each_entry(itr, rule_head, list_entry) {
4188 		struct ice_fltr_info *fltr_info;
4189 		u8 fltr_promisc_mask = 0;
4190 
4191 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4192 			continue;
4193 		fltr_info = &itr->fltr_info;
4194 
4195 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4196 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4197 			continue;
4198 
4199 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4200 
4201 		/* Skip if filter is not completely specified by given mask */
4202 		if (fltr_promisc_mask & ~promisc_mask)
4203 			continue;
4204 
4205 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4206 							&remove_list_head,
4207 							fltr_info);
4208 		if (status) {
4209 			mutex_unlock(rule_lock);
4210 			goto free_fltr_list;
4211 		}
4212 	}
4213 	mutex_unlock(rule_lock);
4214 
4215 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4216 
4217 free_fltr_list:
4218 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4219 		list_del(&fm_entry->list_entry);
4220 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4221 	}
4222 
4223 	return status;
4224 }
4225 
4226 /**
4227  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4228  * @hw: pointer to the hardware structure
4229  * @vsi_handle: VSI handle to configure
4230  * @promisc_mask: mask of promiscuous config bits
4231  * @vid: VLAN ID to set VLAN promiscuous
4232  */
4233 int
ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)4234 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4235 {
4236 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4237 	struct ice_fltr_list_entry f_list_entry;
4238 	struct ice_fltr_info new_fltr;
4239 	bool is_tx_fltr;
4240 	int status = 0;
4241 	u16 hw_vsi_id;
4242 	int pkt_type;
4243 	u8 recipe_id;
4244 
4245 	if (!ice_is_vsi_valid(hw, vsi_handle))
4246 		return -EINVAL;
4247 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4248 
4249 	memset(&new_fltr, 0, sizeof(new_fltr));
4250 
4251 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4252 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4253 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4254 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4255 	} else {
4256 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4257 		recipe_id = ICE_SW_LKUP_PROMISC;
4258 	}
4259 
4260 	/* Separate filters must be set for each direction/packet type
4261 	 * combination, so we will loop over the mask value, store the
4262 	 * individual type, and clear it out in the input mask as it
4263 	 * is found.
4264 	 */
4265 	while (promisc_mask) {
4266 		u8 *mac_addr;
4267 
4268 		pkt_type = 0;
4269 		is_tx_fltr = false;
4270 
4271 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4272 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4273 			pkt_type = UCAST_FLTR;
4274 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4275 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4276 			pkt_type = UCAST_FLTR;
4277 			is_tx_fltr = true;
4278 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4279 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4280 			pkt_type = MCAST_FLTR;
4281 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4282 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4283 			pkt_type = MCAST_FLTR;
4284 			is_tx_fltr = true;
4285 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4286 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4287 			pkt_type = BCAST_FLTR;
4288 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4289 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4290 			pkt_type = BCAST_FLTR;
4291 			is_tx_fltr = true;
4292 		}
4293 
4294 		/* Check for VLAN promiscuous flag */
4295 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4296 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4297 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4298 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4299 			is_tx_fltr = true;
4300 		}
4301 
4302 		/* Set filter DA based on packet type */
4303 		mac_addr = new_fltr.l_data.mac.mac_addr;
4304 		if (pkt_type == BCAST_FLTR) {
4305 			eth_broadcast_addr(mac_addr);
4306 		} else if (pkt_type == MCAST_FLTR ||
4307 			   pkt_type == UCAST_FLTR) {
4308 			/* Use the dummy ether header DA */
4309 			ether_addr_copy(mac_addr, dummy_eth_header);
4310 			if (pkt_type == MCAST_FLTR)
4311 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4312 		}
4313 
4314 		/* Need to reset this to zero for all iterations */
4315 		new_fltr.flag = 0;
4316 		if (is_tx_fltr) {
4317 			new_fltr.flag |= ICE_FLTR_TX;
4318 			new_fltr.src = hw_vsi_id;
4319 		} else {
4320 			new_fltr.flag |= ICE_FLTR_RX;
4321 			new_fltr.src = hw->port_info->lport;
4322 		}
4323 
4324 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4325 		new_fltr.vsi_handle = vsi_handle;
4326 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4327 		f_list_entry.fltr_info = new_fltr;
4328 
4329 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4330 		if (status)
4331 			goto set_promisc_exit;
4332 	}
4333 
4334 set_promisc_exit:
4335 	return status;
4336 }
4337 
4338 /**
4339  * ice_set_vlan_vsi_promisc
4340  * @hw: pointer to the hardware structure
4341  * @vsi_handle: VSI handle to configure
4342  * @promisc_mask: mask of promiscuous config bits
4343  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4344  *
4345  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4346  */
4347 int
ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc)4348 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4349 			 bool rm_vlan_promisc)
4350 {
4351 	struct ice_switch_info *sw = hw->switch_info;
4352 	struct ice_fltr_list_entry *list_itr, *tmp;
4353 	struct list_head vsi_list_head;
4354 	struct list_head *vlan_head;
4355 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4356 	u16 vlan_id;
4357 	int status;
4358 
4359 	INIT_LIST_HEAD(&vsi_list_head);
4360 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4361 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4362 	mutex_lock(vlan_lock);
4363 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4364 					  &vsi_list_head);
4365 	mutex_unlock(vlan_lock);
4366 	if (status)
4367 		goto free_fltr_list;
4368 
4369 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4370 		/* Avoid enabling or disabling VLAN zero twice when in double
4371 		 * VLAN mode
4372 		 */
4373 		if (ice_is_dvm_ena(hw) &&
4374 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
4375 			continue;
4376 
4377 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4378 		if (rm_vlan_promisc)
4379 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4380 						       promisc_mask, vlan_id);
4381 		else
4382 			status = ice_set_vsi_promisc(hw, vsi_handle,
4383 						     promisc_mask, vlan_id);
4384 		if (status && status != -EEXIST)
4385 			break;
4386 	}
4387 
4388 free_fltr_list:
4389 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4390 		list_del(&list_itr->list_entry);
4391 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4392 	}
4393 	return status;
4394 }
4395 
4396 /**
4397  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4398  * @hw: pointer to the hardware structure
4399  * @vsi_handle: VSI handle to remove filters from
4400  * @lkup: switch rule filter lookup type
4401  */
4402 static void
ice_remove_vsi_lkup_fltr(struct ice_hw * hw,u16 vsi_handle,enum ice_sw_lkup_type lkup)4403 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4404 			 enum ice_sw_lkup_type lkup)
4405 {
4406 	struct ice_switch_info *sw = hw->switch_info;
4407 	struct ice_fltr_list_entry *fm_entry;
4408 	struct list_head remove_list_head;
4409 	struct list_head *rule_head;
4410 	struct ice_fltr_list_entry *tmp;
4411 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4412 	int status;
4413 
4414 	INIT_LIST_HEAD(&remove_list_head);
4415 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4416 	rule_head = &sw->recp_list[lkup].filt_rules;
4417 	mutex_lock(rule_lock);
4418 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4419 					  &remove_list_head);
4420 	mutex_unlock(rule_lock);
4421 	if (status)
4422 		goto free_fltr_list;
4423 
4424 	switch (lkup) {
4425 	case ICE_SW_LKUP_MAC:
4426 		ice_remove_mac(hw, &remove_list_head);
4427 		break;
4428 	case ICE_SW_LKUP_VLAN:
4429 		ice_remove_vlan(hw, &remove_list_head);
4430 		break;
4431 	case ICE_SW_LKUP_PROMISC:
4432 	case ICE_SW_LKUP_PROMISC_VLAN:
4433 		ice_remove_promisc(hw, lkup, &remove_list_head);
4434 		break;
4435 	case ICE_SW_LKUP_MAC_VLAN:
4436 	case ICE_SW_LKUP_ETHERTYPE:
4437 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4438 	case ICE_SW_LKUP_DFLT:
4439 	case ICE_SW_LKUP_LAST:
4440 	default:
4441 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4442 		break;
4443 	}
4444 
4445 free_fltr_list:
4446 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4447 		list_del(&fm_entry->list_entry);
4448 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4449 	}
4450 }
4451 
4452 /**
4453  * ice_remove_vsi_fltr - Remove all filters for a VSI
4454  * @hw: pointer to the hardware structure
4455  * @vsi_handle: VSI handle to remove filters from
4456  */
ice_remove_vsi_fltr(struct ice_hw * hw,u16 vsi_handle)4457 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4458 {
4459 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4460 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4461 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4462 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4463 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4464 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4465 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4466 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4467 }
4468 
4469 /**
4470  * ice_alloc_res_cntr - allocating resource counter
4471  * @hw: pointer to the hardware structure
4472  * @type: type of resource
4473  * @alloc_shared: if set it is shared else dedicated
4474  * @num_items: number of entries requested for FD resource type
4475  * @counter_id: counter index returned by AQ call
4476  */
4477 int
ice_alloc_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 * counter_id)4478 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4479 		   u16 *counter_id)
4480 {
4481 	struct ice_aqc_alloc_free_res_elem *buf;
4482 	u16 buf_len;
4483 	int status;
4484 
4485 	/* Allocate resource */
4486 	buf_len = struct_size(buf, elem, 1);
4487 	buf = kzalloc(buf_len, GFP_KERNEL);
4488 	if (!buf)
4489 		return -ENOMEM;
4490 
4491 	buf->num_elems = cpu_to_le16(num_items);
4492 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4493 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4494 
4495 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4496 				       ice_aqc_opc_alloc_res, NULL);
4497 	if (status)
4498 		goto exit;
4499 
4500 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4501 
4502 exit:
4503 	kfree(buf);
4504 	return status;
4505 }
4506 
4507 /**
4508  * ice_free_res_cntr - free resource counter
4509  * @hw: pointer to the hardware structure
4510  * @type: type of resource
4511  * @alloc_shared: if set it is shared else dedicated
4512  * @num_items: number of entries to be freed for FD resource type
4513  * @counter_id: counter ID resource which needs to be freed
4514  */
4515 int
ice_free_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 counter_id)4516 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4517 		  u16 counter_id)
4518 {
4519 	struct ice_aqc_alloc_free_res_elem *buf;
4520 	u16 buf_len;
4521 	int status;
4522 
4523 	/* Free resource */
4524 	buf_len = struct_size(buf, elem, 1);
4525 	buf = kzalloc(buf_len, GFP_KERNEL);
4526 	if (!buf)
4527 		return -ENOMEM;
4528 
4529 	buf->num_elems = cpu_to_le16(num_items);
4530 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4531 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4532 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4533 
4534 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4535 				       ice_aqc_opc_free_res, NULL);
4536 	if (status)
4537 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4538 
4539 	kfree(buf);
4540 	return status;
4541 }
4542 
4543 /* This is mapping table entry that maps every word within a given protocol
4544  * structure to the real byte offset as per the specification of that
4545  * protocol header.
4546  * for example dst address is 3 words in ethertype header and corresponding
4547  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4548  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4549  * matching entry describing its field. This needs to be updated if new
4550  * structure is added to that union.
4551  */
4552 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4553 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
4554 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
4555 	{ ICE_ETYPE_OL,		{ 0 } },
4556 	{ ICE_ETYPE_IL,		{ 0 } },
4557 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
4558 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4559 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4560 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4561 				 26, 28, 30, 32, 34, 36, 38 } },
4562 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4563 				 26, 28, 30, 32, 34, 36, 38 } },
4564 	{ ICE_TCP_IL,		{ 0, 2 } },
4565 	{ ICE_UDP_OF,		{ 0, 2 } },
4566 	{ ICE_UDP_ILOS,		{ 0, 2 } },
4567 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
4568 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
4569 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
4570 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
4571 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
4572 	{ ICE_PPPOE,		{ 0, 2, 4, 6 } },
4573 	{ ICE_L2TPV3,		{ 0, 2, 4, 6, 8, 10 } },
4574 	{ ICE_VLAN_EX,          { 2, 0 } },
4575 	{ ICE_VLAN_IN,          { 2, 0 } },
4576 };
4577 
4578 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4579 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4580 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4581 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4582 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4583 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4584 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4585 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4586 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4587 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4588 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4589 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4590 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4591 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4592 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4593 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4594 	{ ICE_GTP,		ICE_UDP_OF_HW },
4595 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4596 	{ ICE_PPPOE,		ICE_PPPOE_HW },
4597 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
4598 	{ ICE_VLAN_EX,          ICE_VLAN_OF_HW },
4599 	{ ICE_VLAN_IN,          ICE_VLAN_OL_HW },
4600 };
4601 
4602 /**
4603  * ice_find_recp - find a recipe
4604  * @hw: pointer to the hardware structure
4605  * @lkup_exts: extension sequence to match
4606  * @tun_type: type of recipe tunnel
4607  *
4608  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4609  */
4610 static u16
ice_find_recp(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,enum ice_sw_tunnel_type tun_type)4611 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4612 	      enum ice_sw_tunnel_type tun_type)
4613 {
4614 	bool refresh_required = true;
4615 	struct ice_sw_recipe *recp;
4616 	u8 i;
4617 
4618 	/* Walk through existing recipes to find a match */
4619 	recp = hw->switch_info->recp_list;
4620 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4621 		/* If recipe was not created for this ID, in SW bookkeeping,
4622 		 * check if FW has an entry for this recipe. If the FW has an
4623 		 * entry update it in our SW bookkeeping and continue with the
4624 		 * matching.
4625 		 */
4626 		if (!recp[i].recp_created)
4627 			if (ice_get_recp_frm_fw(hw,
4628 						hw->switch_info->recp_list, i,
4629 						&refresh_required))
4630 				continue;
4631 
4632 		/* Skip inverse action recipes */
4633 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4634 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4635 			continue;
4636 
4637 		/* if number of words we are looking for match */
4638 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4639 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4640 			struct ice_fv_word *be = lkup_exts->fv_words;
4641 			u16 *cr = recp[i].lkup_exts.field_mask;
4642 			u16 *de = lkup_exts->field_mask;
4643 			bool found = true;
4644 			u8 pe, qr;
4645 
4646 			/* ar, cr, and qr are related to the recipe words, while
4647 			 * be, de, and pe are related to the lookup words
4648 			 */
4649 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4650 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4651 				     qr++) {
4652 					if (ar[qr].off == be[pe].off &&
4653 					    ar[qr].prot_id == be[pe].prot_id &&
4654 					    cr[qr] == de[pe])
4655 						/* Found the "pe"th word in the
4656 						 * given recipe
4657 						 */
4658 						break;
4659 				}
4660 				/* After walking through all the words in the
4661 				 * "i"th recipe if "p"th word was not found then
4662 				 * this recipe is not what we are looking for.
4663 				 * So break out from this loop and try the next
4664 				 * recipe
4665 				 */
4666 				if (qr >= recp[i].lkup_exts.n_val_words) {
4667 					found = false;
4668 					break;
4669 				}
4670 			}
4671 			/* If for "i"th recipe the found was never set to false
4672 			 * then it means we found our match
4673 			 * Also tun type of recipe needs to be checked
4674 			 */
4675 			if (found && recp[i].tun_type == tun_type)
4676 				return i; /* Return the recipe ID */
4677 		}
4678 	}
4679 	return ICE_MAX_NUM_RECIPES;
4680 }
4681 
4682 /**
4683  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4684  *
4685  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4686  * supported protocol array record for outer vlan has to be modified to
4687  * reflect the value proper for DVM.
4688  */
ice_change_proto_id_to_dvm(void)4689 void ice_change_proto_id_to_dvm(void)
4690 {
4691 	u8 i;
4692 
4693 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4694 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4695 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4696 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4697 }
4698 
4699 /**
4700  * ice_prot_type_to_id - get protocol ID from protocol type
4701  * @type: protocol type
4702  * @id: pointer to variable that will receive the ID
4703  *
4704  * Returns true if found, false otherwise
4705  */
ice_prot_type_to_id(enum ice_protocol_type type,u8 * id)4706 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4707 {
4708 	u8 i;
4709 
4710 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4711 		if (ice_prot_id_tbl[i].type == type) {
4712 			*id = ice_prot_id_tbl[i].protocol_id;
4713 			return true;
4714 		}
4715 	return false;
4716 }
4717 
4718 /**
4719  * ice_fill_valid_words - count valid words
4720  * @rule: advanced rule with lookup information
4721  * @lkup_exts: byte offset extractions of the words that are valid
4722  *
4723  * calculate valid words in a lookup rule using mask value
4724  */
4725 static u8
ice_fill_valid_words(struct ice_adv_lkup_elem * rule,struct ice_prot_lkup_ext * lkup_exts)4726 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4727 		     struct ice_prot_lkup_ext *lkup_exts)
4728 {
4729 	u8 j, word, prot_id, ret_val;
4730 
4731 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4732 		return 0;
4733 
4734 	word = lkup_exts->n_val_words;
4735 
4736 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4737 		if (((u16 *)&rule->m_u)[j] &&
4738 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4739 			/* No more space to accommodate */
4740 			if (word >= ICE_MAX_CHAIN_WORDS)
4741 				return 0;
4742 			lkup_exts->fv_words[word].off =
4743 				ice_prot_ext[rule->type].offs[j];
4744 			lkup_exts->fv_words[word].prot_id =
4745 				ice_prot_id_tbl[rule->type].protocol_id;
4746 			lkup_exts->field_mask[word] =
4747 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4748 			word++;
4749 		}
4750 
4751 	ret_val = word - lkup_exts->n_val_words;
4752 	lkup_exts->n_val_words = word;
4753 
4754 	return ret_val;
4755 }
4756 
4757 /**
4758  * ice_create_first_fit_recp_def - Create a recipe grouping
4759  * @hw: pointer to the hardware structure
4760  * @lkup_exts: an array of protocol header extractions
4761  * @rg_list: pointer to a list that stores new recipe groups
4762  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4763  *
4764  * Using first fit algorithm, take all the words that are still not done
4765  * and start grouping them in 4-word groups. Each group makes up one
4766  * recipe.
4767  */
4768 static int
ice_create_first_fit_recp_def(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,struct list_head * rg_list,u8 * recp_cnt)4769 ice_create_first_fit_recp_def(struct ice_hw *hw,
4770 			      struct ice_prot_lkup_ext *lkup_exts,
4771 			      struct list_head *rg_list,
4772 			      u8 *recp_cnt)
4773 {
4774 	struct ice_pref_recipe_group *grp = NULL;
4775 	u8 j;
4776 
4777 	*recp_cnt = 0;
4778 
4779 	/* Walk through every word in the rule to check if it is not done. If so
4780 	 * then this word needs to be part of a new recipe.
4781 	 */
4782 	for (j = 0; j < lkup_exts->n_val_words; j++)
4783 		if (!test_bit(j, lkup_exts->done)) {
4784 			if (!grp ||
4785 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4786 				struct ice_recp_grp_entry *entry;
4787 
4788 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4789 						     sizeof(*entry),
4790 						     GFP_KERNEL);
4791 				if (!entry)
4792 					return -ENOMEM;
4793 				list_add(&entry->l_entry, rg_list);
4794 				grp = &entry->r_group;
4795 				(*recp_cnt)++;
4796 			}
4797 
4798 			grp->pairs[grp->n_val_pairs].prot_id =
4799 				lkup_exts->fv_words[j].prot_id;
4800 			grp->pairs[grp->n_val_pairs].off =
4801 				lkup_exts->fv_words[j].off;
4802 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4803 			grp->n_val_pairs++;
4804 		}
4805 
4806 	return 0;
4807 }
4808 
4809 /**
4810  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4811  * @hw: pointer to the hardware structure
4812  * @fv_list: field vector with the extraction sequence information
4813  * @rg_list: recipe groupings with protocol-offset pairs
4814  *
4815  * Helper function to fill in the field vector indices for protocol-offset
4816  * pairs. These indexes are then ultimately programmed into a recipe.
4817  */
4818 static int
ice_fill_fv_word_index(struct ice_hw * hw,struct list_head * fv_list,struct list_head * rg_list)4819 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4820 		       struct list_head *rg_list)
4821 {
4822 	struct ice_sw_fv_list_entry *fv;
4823 	struct ice_recp_grp_entry *rg;
4824 	struct ice_fv_word *fv_ext;
4825 
4826 	if (list_empty(fv_list))
4827 		return 0;
4828 
4829 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4830 			      list_entry);
4831 	fv_ext = fv->fv_ptr->ew;
4832 
4833 	list_for_each_entry(rg, rg_list, l_entry) {
4834 		u8 i;
4835 
4836 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4837 			struct ice_fv_word *pr;
4838 			bool found = false;
4839 			u16 mask;
4840 			u8 j;
4841 
4842 			pr = &rg->r_group.pairs[i];
4843 			mask = rg->r_group.mask[i];
4844 
4845 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4846 				if (fv_ext[j].prot_id == pr->prot_id &&
4847 				    fv_ext[j].off == pr->off) {
4848 					found = true;
4849 
4850 					/* Store index of field vector */
4851 					rg->fv_idx[i] = j;
4852 					rg->fv_mask[i] = mask;
4853 					break;
4854 				}
4855 
4856 			/* Protocol/offset could not be found, caller gave an
4857 			 * invalid pair
4858 			 */
4859 			if (!found)
4860 				return -EINVAL;
4861 		}
4862 	}
4863 
4864 	return 0;
4865 }
4866 
4867 /**
4868  * ice_find_free_recp_res_idx - find free result indexes for recipe
4869  * @hw: pointer to hardware structure
4870  * @profiles: bitmap of profiles that will be associated with the new recipe
4871  * @free_idx: pointer to variable to receive the free index bitmap
4872  *
4873  * The algorithm used here is:
4874  *	1. When creating a new recipe, create a set P which contains all
4875  *	   Profiles that will be associated with our new recipe
4876  *
4877  *	2. For each Profile p in set P:
4878  *	    a. Add all recipes associated with Profile p into set R
4879  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4880  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4881  *		i. Or just assume they all have the same possible indexes:
4882  *			44, 45, 46, 47
4883  *			i.e., PossibleIndexes = 0x0000F00000000000
4884  *
4885  *	3. For each Recipe r in set R:
4886  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4887  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4888  *
4889  *	FreeIndexes will contain the bits indicating the indexes free for use,
4890  *      then the code needs to update the recipe[r].used_result_idx_bits to
4891  *      indicate which indexes were selected for use by this recipe.
4892  */
4893 static u16
ice_find_free_recp_res_idx(struct ice_hw * hw,const unsigned long * profiles,unsigned long * free_idx)4894 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4895 			   unsigned long *free_idx)
4896 {
4897 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4898 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4899 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4900 	u16 bit;
4901 
4902 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4903 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4904 
4905 	bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4906 
4907 	/* For each profile we are going to associate the recipe with, add the
4908 	 * recipes that are associated with that profile. This will give us
4909 	 * the set of recipes that our recipe may collide with. Also, determine
4910 	 * what possible result indexes are usable given this set of profiles.
4911 	 */
4912 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4913 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4914 			  ICE_MAX_NUM_RECIPES);
4915 		bitmap_and(possible_idx, possible_idx,
4916 			   hw->switch_info->prof_res_bm[bit],
4917 			   ICE_MAX_FV_WORDS);
4918 	}
4919 
4920 	/* For each recipe that our new recipe may collide with, determine
4921 	 * which indexes have been used.
4922 	 */
4923 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4924 		bitmap_or(used_idx, used_idx,
4925 			  hw->switch_info->recp_list[bit].res_idxs,
4926 			  ICE_MAX_FV_WORDS);
4927 
4928 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4929 
4930 	/* return number of free indexes */
4931 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4932 }
4933 
4934 /**
4935  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4936  * @hw: pointer to hardware structure
4937  * @rm: recipe management list entry
4938  * @profiles: bitmap of profiles that will be associated.
4939  */
4940 static int
ice_add_sw_recipe(struct ice_hw * hw,struct ice_sw_recipe * rm,unsigned long * profiles)4941 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4942 		  unsigned long *profiles)
4943 {
4944 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4945 	struct ice_aqc_recipe_data_elem *tmp;
4946 	struct ice_aqc_recipe_data_elem *buf;
4947 	struct ice_recp_grp_entry *entry;
4948 	u16 free_res_idx;
4949 	u16 recipe_count;
4950 	u8 chain_idx;
4951 	u8 recps = 0;
4952 	int status;
4953 
4954 	/* When more than one recipe are required, another recipe is needed to
4955 	 * chain them together. Matching a tunnel metadata ID takes up one of
4956 	 * the match fields in the chaining recipe reducing the number of
4957 	 * chained recipes by one.
4958 	 */
4959 	 /* check number of free result indices */
4960 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4961 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4962 
4963 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4964 		  free_res_idx, rm->n_grp_count);
4965 
4966 	if (rm->n_grp_count > 1) {
4967 		if (rm->n_grp_count > free_res_idx)
4968 			return -ENOSPC;
4969 
4970 		rm->n_grp_count++;
4971 	}
4972 
4973 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4974 		return -ENOSPC;
4975 
4976 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4977 	if (!tmp)
4978 		return -ENOMEM;
4979 
4980 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4981 			   GFP_KERNEL);
4982 	if (!buf) {
4983 		status = -ENOMEM;
4984 		goto err_mem;
4985 	}
4986 
4987 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4988 	recipe_count = ICE_MAX_NUM_RECIPES;
4989 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4990 				   NULL);
4991 	if (status || recipe_count == 0)
4992 		goto err_unroll;
4993 
4994 	/* Allocate the recipe resources, and configure them according to the
4995 	 * match fields from protocol headers and extracted field vectors.
4996 	 */
4997 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4998 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
4999 		u8 i;
5000 
5001 		status = ice_alloc_recipe(hw, &entry->rid);
5002 		if (status)
5003 			goto err_unroll;
5004 
5005 		/* Clear the result index of the located recipe, as this will be
5006 		 * updated, if needed, later in the recipe creation process.
5007 		 */
5008 		tmp[0].content.result_indx = 0;
5009 
5010 		buf[recps] = tmp[0];
5011 		buf[recps].recipe_indx = (u8)entry->rid;
5012 		/* if the recipe is a non-root recipe RID should be programmed
5013 		 * as 0 for the rules to be applied correctly.
5014 		 */
5015 		buf[recps].content.rid = 0;
5016 		memset(&buf[recps].content.lkup_indx, 0,
5017 		       sizeof(buf[recps].content.lkup_indx));
5018 
5019 		/* All recipes use look-up index 0 to match switch ID. */
5020 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5021 		buf[recps].content.mask[0] =
5022 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5023 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5024 		 * to be 0
5025 		 */
5026 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5027 			buf[recps].content.lkup_indx[i] = 0x80;
5028 			buf[recps].content.mask[i] = 0;
5029 		}
5030 
5031 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5032 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5033 			buf[recps].content.mask[i + 1] =
5034 				cpu_to_le16(entry->fv_mask[i]);
5035 		}
5036 
5037 		if (rm->n_grp_count > 1) {
5038 			/* Checks to see if there really is a valid result index
5039 			 * that can be used.
5040 			 */
5041 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5042 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5043 				status = -ENOSPC;
5044 				goto err_unroll;
5045 			}
5046 
5047 			entry->chain_idx = chain_idx;
5048 			buf[recps].content.result_indx =
5049 				ICE_AQ_RECIPE_RESULT_EN |
5050 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5051 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5052 			clear_bit(chain_idx, result_idx_bm);
5053 			chain_idx = find_first_bit(result_idx_bm,
5054 						   ICE_MAX_FV_WORDS);
5055 		}
5056 
5057 		/* fill recipe dependencies */
5058 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5059 			    ICE_MAX_NUM_RECIPES);
5060 		set_bit(buf[recps].recipe_indx,
5061 			(unsigned long *)buf[recps].recipe_bitmap);
5062 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5063 		recps++;
5064 	}
5065 
5066 	if (rm->n_grp_count == 1) {
5067 		rm->root_rid = buf[0].recipe_indx;
5068 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5069 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5070 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5071 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5072 			       sizeof(buf[0].recipe_bitmap));
5073 		} else {
5074 			status = -EINVAL;
5075 			goto err_unroll;
5076 		}
5077 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5078 		 * the recipe which is getting created if specified
5079 		 * by user. Usually any advanced switch filter, which results
5080 		 * into new extraction sequence, ended up creating a new recipe
5081 		 * of type ROOT and usually recipes are associated with profiles
5082 		 * Switch rule referreing newly created recipe, needs to have
5083 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5084 		 * evaluation will not happen correctly. In other words, if
5085 		 * switch rule to be evaluated on priority basis, then recipe
5086 		 * needs to have priority, otherwise it will be evaluated last.
5087 		 */
5088 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5089 	} else {
5090 		struct ice_recp_grp_entry *last_chain_entry;
5091 		u16 rid, i;
5092 
5093 		/* Allocate the last recipe that will chain the outcomes of the
5094 		 * other recipes together
5095 		 */
5096 		status = ice_alloc_recipe(hw, &rid);
5097 		if (status)
5098 			goto err_unroll;
5099 
5100 		buf[recps].recipe_indx = (u8)rid;
5101 		buf[recps].content.rid = (u8)rid;
5102 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5103 		/* the new entry created should also be part of rg_list to
5104 		 * make sure we have complete recipe
5105 		 */
5106 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5107 						sizeof(*last_chain_entry),
5108 						GFP_KERNEL);
5109 		if (!last_chain_entry) {
5110 			status = -ENOMEM;
5111 			goto err_unroll;
5112 		}
5113 		last_chain_entry->rid = rid;
5114 		memset(&buf[recps].content.lkup_indx, 0,
5115 		       sizeof(buf[recps].content.lkup_indx));
5116 		/* All recipes use look-up index 0 to match switch ID. */
5117 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5118 		buf[recps].content.mask[0] =
5119 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5120 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5121 			buf[recps].content.lkup_indx[i] =
5122 				ICE_AQ_RECIPE_LKUP_IGNORE;
5123 			buf[recps].content.mask[i] = 0;
5124 		}
5125 
5126 		i = 1;
5127 		/* update r_bitmap with the recp that is used for chaining */
5128 		set_bit(rid, rm->r_bitmap);
5129 		/* this is the recipe that chains all the other recipes so it
5130 		 * should not have a chaining ID to indicate the same
5131 		 */
5132 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5133 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5134 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5135 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5136 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5137 			set_bit(entry->rid, rm->r_bitmap);
5138 		}
5139 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5140 		if (sizeof(buf[recps].recipe_bitmap) >=
5141 		    sizeof(rm->r_bitmap)) {
5142 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5143 			       sizeof(buf[recps].recipe_bitmap));
5144 		} else {
5145 			status = -EINVAL;
5146 			goto err_unroll;
5147 		}
5148 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5149 
5150 		recps++;
5151 		rm->root_rid = (u8)rid;
5152 	}
5153 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5154 	if (status)
5155 		goto err_unroll;
5156 
5157 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5158 	ice_release_change_lock(hw);
5159 	if (status)
5160 		goto err_unroll;
5161 
5162 	/* Every recipe that just got created add it to the recipe
5163 	 * book keeping list
5164 	 */
5165 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5166 		struct ice_switch_info *sw = hw->switch_info;
5167 		bool is_root, idx_found = false;
5168 		struct ice_sw_recipe *recp;
5169 		u16 idx, buf_idx = 0;
5170 
5171 		/* find buffer index for copying some data */
5172 		for (idx = 0; idx < rm->n_grp_count; idx++)
5173 			if (buf[idx].recipe_indx == entry->rid) {
5174 				buf_idx = idx;
5175 				idx_found = true;
5176 			}
5177 
5178 		if (!idx_found) {
5179 			status = -EIO;
5180 			goto err_unroll;
5181 		}
5182 
5183 		recp = &sw->recp_list[entry->rid];
5184 		is_root = (rm->root_rid == entry->rid);
5185 		recp->is_root = is_root;
5186 
5187 		recp->root_rid = entry->rid;
5188 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5189 
5190 		memcpy(&recp->ext_words, entry->r_group.pairs,
5191 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5192 
5193 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5194 		       sizeof(recp->r_bitmap));
5195 
5196 		/* Copy non-result fv index values and masks to recipe. This
5197 		 * call will also update the result recipe bitmask.
5198 		 */
5199 		ice_collect_result_idx(&buf[buf_idx], recp);
5200 
5201 		/* for non-root recipes, also copy to the root, this allows
5202 		 * easier matching of a complete chained recipe
5203 		 */
5204 		if (!is_root)
5205 			ice_collect_result_idx(&buf[buf_idx],
5206 					       &sw->recp_list[rm->root_rid]);
5207 
5208 		recp->n_ext_words = entry->r_group.n_val_pairs;
5209 		recp->chain_idx = entry->chain_idx;
5210 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5211 		recp->n_grp_count = rm->n_grp_count;
5212 		recp->tun_type = rm->tun_type;
5213 		recp->recp_created = true;
5214 	}
5215 	rm->root_buf = buf;
5216 	kfree(tmp);
5217 	return status;
5218 
5219 err_unroll:
5220 err_mem:
5221 	kfree(tmp);
5222 	devm_kfree(ice_hw_to_dev(hw), buf);
5223 	return status;
5224 }
5225 
5226 /**
5227  * ice_create_recipe_group - creates recipe group
5228  * @hw: pointer to hardware structure
5229  * @rm: recipe management list entry
5230  * @lkup_exts: lookup elements
5231  */
5232 static int
ice_create_recipe_group(struct ice_hw * hw,struct ice_sw_recipe * rm,struct ice_prot_lkup_ext * lkup_exts)5233 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5234 			struct ice_prot_lkup_ext *lkup_exts)
5235 {
5236 	u8 recp_count = 0;
5237 	int status;
5238 
5239 	rm->n_grp_count = 0;
5240 
5241 	/* Create recipes for words that are marked not done by packing them
5242 	 * as best fit.
5243 	 */
5244 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5245 					       &rm->rg_list, &recp_count);
5246 	if (!status) {
5247 		rm->n_grp_count += recp_count;
5248 		rm->n_ext_words = lkup_exts->n_val_words;
5249 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5250 		       sizeof(rm->ext_words));
5251 		memcpy(rm->word_masks, lkup_exts->field_mask,
5252 		       sizeof(rm->word_masks));
5253 	}
5254 
5255 	return status;
5256 }
5257 
5258 /**
5259  * ice_tun_type_match_word - determine if tun type needs a match mask
5260  * @tun_type: tunnel type
5261  * @mask: mask to be used for the tunnel
5262  */
ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type,u16 * mask)5263 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5264 {
5265 	switch (tun_type) {
5266 	case ICE_SW_TUN_GENEVE:
5267 	case ICE_SW_TUN_VXLAN:
5268 	case ICE_SW_TUN_NVGRE:
5269 	case ICE_SW_TUN_GTPU:
5270 	case ICE_SW_TUN_GTPC:
5271 		*mask = ICE_TUN_FLAG_MASK;
5272 		return true;
5273 
5274 	default:
5275 		*mask = 0;
5276 		return false;
5277 	}
5278 }
5279 
5280 /**
5281  * ice_add_special_words - Add words that are not protocols, such as metadata
5282  * @rinfo: other information regarding the rule e.g. priority and action info
5283  * @lkup_exts: lookup word structure
5284  * @dvm_ena: is double VLAN mode enabled
5285  */
5286 static int
ice_add_special_words(struct ice_adv_rule_info * rinfo,struct ice_prot_lkup_ext * lkup_exts,bool dvm_ena)5287 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5288 		      struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
5289 {
5290 	u16 mask;
5291 
5292 	/* If this is a tunneled packet, then add recipe index to match the
5293 	 * tunnel bit in the packet metadata flags.
5294 	 */
5295 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5296 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5297 			u8 word = lkup_exts->n_val_words++;
5298 
5299 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5300 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5301 			lkup_exts->field_mask[word] = mask;
5302 		} else {
5303 			return -ENOSPC;
5304 		}
5305 	}
5306 
5307 	if (rinfo->vlan_type != 0 && dvm_ena) {
5308 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5309 			u8 word = lkup_exts->n_val_words++;
5310 
5311 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5312 			lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
5313 			lkup_exts->field_mask[word] =
5314 					ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
5315 		} else {
5316 			return -ENOSPC;
5317 		}
5318 	}
5319 
5320 	return 0;
5321 }
5322 
5323 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5324  * @hw: pointer to hardware structure
5325  * @rinfo: other information regarding the rule e.g. priority and action info
5326  * @bm: pointer to memory for returning the bitmap of field vectors
5327  */
5328 static void
ice_get_compat_fv_bitmap(struct ice_hw * hw,struct ice_adv_rule_info * rinfo,unsigned long * bm)5329 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5330 			 unsigned long *bm)
5331 {
5332 	enum ice_prof_type prof_type;
5333 
5334 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5335 
5336 	switch (rinfo->tun_type) {
5337 	case ICE_NON_TUN:
5338 		prof_type = ICE_PROF_NON_TUN;
5339 		break;
5340 	case ICE_ALL_TUNNELS:
5341 		prof_type = ICE_PROF_TUN_ALL;
5342 		break;
5343 	case ICE_SW_TUN_GENEVE:
5344 	case ICE_SW_TUN_VXLAN:
5345 		prof_type = ICE_PROF_TUN_UDP;
5346 		break;
5347 	case ICE_SW_TUN_NVGRE:
5348 		prof_type = ICE_PROF_TUN_GRE;
5349 		break;
5350 	case ICE_SW_TUN_GTPU:
5351 		prof_type = ICE_PROF_TUN_GTPU;
5352 		break;
5353 	case ICE_SW_TUN_GTPC:
5354 		prof_type = ICE_PROF_TUN_GTPC;
5355 		break;
5356 	case ICE_SW_TUN_AND_NON_TUN:
5357 	default:
5358 		prof_type = ICE_PROF_ALL;
5359 		break;
5360 	}
5361 
5362 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5363 }
5364 
5365 /**
5366  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5367  * @hw: pointer to hardware structure
5368  * @lkups: lookup elements or match criteria for the advanced recipe, one
5369  *  structure per protocol header
5370  * @lkups_cnt: number of protocols
5371  * @rinfo: other information regarding the rule e.g. priority and action info
5372  * @rid: return the recipe ID of the recipe created
5373  */
5374 static int
ice_add_adv_recipe(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,u16 * rid)5375 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5376 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5377 {
5378 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5379 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5380 	struct ice_prot_lkup_ext *lkup_exts;
5381 	struct ice_recp_grp_entry *r_entry;
5382 	struct ice_sw_fv_list_entry *fvit;
5383 	struct ice_recp_grp_entry *r_tmp;
5384 	struct ice_sw_fv_list_entry *tmp;
5385 	struct ice_sw_recipe *rm;
5386 	int status = 0;
5387 	u8 i;
5388 
5389 	if (!lkups_cnt)
5390 		return -EINVAL;
5391 
5392 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5393 	if (!lkup_exts)
5394 		return -ENOMEM;
5395 
5396 	/* Determine the number of words to be matched and if it exceeds a
5397 	 * recipe's restrictions
5398 	 */
5399 	for (i = 0; i < lkups_cnt; i++) {
5400 		u16 count;
5401 
5402 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5403 			status = -EIO;
5404 			goto err_free_lkup_exts;
5405 		}
5406 
5407 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5408 		if (!count) {
5409 			status = -EIO;
5410 			goto err_free_lkup_exts;
5411 		}
5412 	}
5413 
5414 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5415 	if (!rm) {
5416 		status = -ENOMEM;
5417 		goto err_free_lkup_exts;
5418 	}
5419 
5420 	/* Get field vectors that contain fields extracted from all the protocol
5421 	 * headers being programmed.
5422 	 */
5423 	INIT_LIST_HEAD(&rm->fv_list);
5424 	INIT_LIST_HEAD(&rm->rg_list);
5425 
5426 	/* Get bitmap of field vectors (profiles) that are compatible with the
5427 	 * rule request; only these will be searched in the subsequent call to
5428 	 * ice_get_sw_fv_list.
5429 	 */
5430 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5431 
5432 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5433 	if (status)
5434 		goto err_unroll;
5435 
5436 	/* Create any special protocol/offset pairs, such as looking at tunnel
5437 	 * bits by extracting metadata
5438 	 */
5439 	status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
5440 	if (status)
5441 		goto err_unroll;
5442 
5443 	/* Group match words into recipes using preferred recipe grouping
5444 	 * criteria.
5445 	 */
5446 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5447 	if (status)
5448 		goto err_unroll;
5449 
5450 	/* set the recipe priority if specified */
5451 	rm->priority = (u8)rinfo->priority;
5452 
5453 	/* Find offsets from the field vector. Pick the first one for all the
5454 	 * recipes.
5455 	 */
5456 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5457 	if (status)
5458 		goto err_unroll;
5459 
5460 	/* get bitmap of all profiles the recipe will be associated with */
5461 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5462 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5463 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5464 		set_bit((u16)fvit->profile_id, profiles);
5465 	}
5466 
5467 	/* Look for a recipe which matches our requested fv / mask list */
5468 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5469 	if (*rid < ICE_MAX_NUM_RECIPES)
5470 		/* Success if found a recipe that match the existing criteria */
5471 		goto err_unroll;
5472 
5473 	rm->tun_type = rinfo->tun_type;
5474 	/* Recipe we need does not exist, add a recipe */
5475 	status = ice_add_sw_recipe(hw, rm, profiles);
5476 	if (status)
5477 		goto err_unroll;
5478 
5479 	/* Associate all the recipes created with all the profiles in the
5480 	 * common field vector.
5481 	 */
5482 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5483 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5484 		u16 j;
5485 
5486 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5487 						      (u8 *)r_bitmap, NULL);
5488 		if (status)
5489 			goto err_unroll;
5490 
5491 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5492 			  ICE_MAX_NUM_RECIPES);
5493 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5494 		if (status)
5495 			goto err_unroll;
5496 
5497 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5498 						      (u8 *)r_bitmap,
5499 						      NULL);
5500 		ice_release_change_lock(hw);
5501 
5502 		if (status)
5503 			goto err_unroll;
5504 
5505 		/* Update profile to recipe bitmap array */
5506 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5507 			    ICE_MAX_NUM_RECIPES);
5508 
5509 		/* Update recipe to profile bitmap array */
5510 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5511 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5512 	}
5513 
5514 	*rid = rm->root_rid;
5515 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5516 	       sizeof(*lkup_exts));
5517 err_unroll:
5518 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5519 		list_del(&r_entry->l_entry);
5520 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5521 	}
5522 
5523 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5524 		list_del(&fvit->list_entry);
5525 		devm_kfree(ice_hw_to_dev(hw), fvit);
5526 	}
5527 
5528 	if (rm->root_buf)
5529 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5530 
5531 	kfree(rm);
5532 
5533 err_free_lkup_exts:
5534 	kfree(lkup_exts);
5535 
5536 	return status;
5537 }
5538 
5539 /**
5540  * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5541  *
5542  * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5543  * @num_vlan: number of VLAN tags
5544  */
5545 static struct ice_dummy_pkt_profile *
ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile * dummy_pkt,u32 num_vlan)5546 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5547 			  u32 num_vlan)
5548 {
5549 	struct ice_dummy_pkt_profile *profile;
5550 	struct ice_dummy_pkt_offsets *offsets;
5551 	u32 buf_len, off, etype_off, i;
5552 	u8 *pkt;
5553 
5554 	if (num_vlan < 1 || num_vlan > 2)
5555 		return ERR_PTR(-EINVAL);
5556 
5557 	off = num_vlan * VLAN_HLEN;
5558 
5559 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5560 		  dummy_pkt->offsets_len;
5561 	offsets = kzalloc(buf_len, GFP_KERNEL);
5562 	if (!offsets)
5563 		return ERR_PTR(-ENOMEM);
5564 
5565 	offsets[0] = dummy_pkt->offsets[0];
5566 	if (num_vlan == 2) {
5567 		offsets[1] = ice_dummy_qinq_packet_offsets[0];
5568 		offsets[2] = ice_dummy_qinq_packet_offsets[1];
5569 	} else if (num_vlan == 1) {
5570 		offsets[1] = ice_dummy_vlan_packet_offsets[0];
5571 	}
5572 
5573 	for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5574 		offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5575 		offsets[i + num_vlan].offset =
5576 			dummy_pkt->offsets[i].offset + off;
5577 	}
5578 	offsets[i + num_vlan] = dummy_pkt->offsets[i];
5579 
5580 	etype_off = dummy_pkt->offsets[1].offset;
5581 
5582 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5583 		  dummy_pkt->pkt_len;
5584 	pkt = kzalloc(buf_len, GFP_KERNEL);
5585 	if (!pkt) {
5586 		kfree(offsets);
5587 		return ERR_PTR(-ENOMEM);
5588 	}
5589 
5590 	memcpy(pkt, dummy_pkt->pkt, etype_off);
5591 	memcpy(pkt + etype_off,
5592 	       num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5593 	       off);
5594 	memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5595 	       dummy_pkt->pkt_len - etype_off);
5596 
5597 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5598 	if (!profile) {
5599 		kfree(offsets);
5600 		kfree(pkt);
5601 		return ERR_PTR(-ENOMEM);
5602 	}
5603 
5604 	profile->offsets = offsets;
5605 	profile->pkt = pkt;
5606 	profile->pkt_len = buf_len;
5607 	profile->match |= ICE_PKT_KMALLOC;
5608 
5609 	return profile;
5610 }
5611 
5612 /**
5613  * ice_find_dummy_packet - find dummy packet
5614  *
5615  * @lkups: lookup elements or match criteria for the advanced recipe, one
5616  *	   structure per protocol header
5617  * @lkups_cnt: number of protocols
5618  * @tun_type: tunnel type
5619  *
5620  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5621  */
5622 static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,enum ice_sw_tunnel_type tun_type)5623 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5624 		      enum ice_sw_tunnel_type tun_type)
5625 {
5626 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5627 	u32 match = 0, vlan_count = 0;
5628 	u16 i;
5629 
5630 	switch (tun_type) {
5631 	case ICE_SW_TUN_GTPC:
5632 		match |= ICE_PKT_TUN_GTPC;
5633 		break;
5634 	case ICE_SW_TUN_GTPU:
5635 		match |= ICE_PKT_TUN_GTPU;
5636 		break;
5637 	case ICE_SW_TUN_NVGRE:
5638 		match |= ICE_PKT_TUN_NVGRE;
5639 		break;
5640 	case ICE_SW_TUN_GENEVE:
5641 	case ICE_SW_TUN_VXLAN:
5642 		match |= ICE_PKT_TUN_UDP;
5643 		break;
5644 	default:
5645 		break;
5646 	}
5647 
5648 	for (i = 0; i < lkups_cnt; i++) {
5649 		if (lkups[i].type == ICE_UDP_ILOS)
5650 			match |= ICE_PKT_INNER_UDP;
5651 		else if (lkups[i].type == ICE_TCP_IL)
5652 			match |= ICE_PKT_INNER_TCP;
5653 		else if (lkups[i].type == ICE_IPV6_OFOS)
5654 			match |= ICE_PKT_OUTER_IPV6;
5655 		else if (lkups[i].type == ICE_VLAN_OFOS ||
5656 			 lkups[i].type == ICE_VLAN_EX)
5657 			vlan_count++;
5658 		else if (lkups[i].type == ICE_VLAN_IN)
5659 			vlan_count++;
5660 		else if (lkups[i].type == ICE_ETYPE_OL &&
5661 			 lkups[i].h_u.ethertype.ethtype_id ==
5662 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5663 			 lkups[i].m_u.ethertype.ethtype_id ==
5664 				cpu_to_be16(0xFFFF))
5665 			match |= ICE_PKT_OUTER_IPV6;
5666 		else if (lkups[i].type == ICE_ETYPE_IL &&
5667 			 lkups[i].h_u.ethertype.ethtype_id ==
5668 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5669 			 lkups[i].m_u.ethertype.ethtype_id ==
5670 				cpu_to_be16(0xFFFF))
5671 			match |= ICE_PKT_INNER_IPV6;
5672 		else if (lkups[i].type == ICE_IPV6_IL)
5673 			match |= ICE_PKT_INNER_IPV6;
5674 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5675 			match |= ICE_PKT_GTP_NOPAY;
5676 		else if (lkups[i].type == ICE_PPPOE) {
5677 			match |= ICE_PKT_PPPOE;
5678 			if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5679 			    htons(PPP_IPV6))
5680 				match |= ICE_PKT_OUTER_IPV6;
5681 		} else if (lkups[i].type == ICE_L2TPV3)
5682 			match |= ICE_PKT_L2TPV3;
5683 	}
5684 
5685 	while (ret->match && (match & ret->match) != ret->match)
5686 		ret++;
5687 
5688 	if (vlan_count != 0)
5689 		ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5690 
5691 	return ret;
5692 }
5693 
5694 /**
5695  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5696  *
5697  * @lkups: lookup elements or match criteria for the advanced recipe, one
5698  *	   structure per protocol header
5699  * @lkups_cnt: number of protocols
5700  * @s_rule: stores rule information from the match criteria
5701  * @profile: dummy packet profile (the template, its size and header offsets)
5702  */
5703 static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_sw_rule_lkup_rx_tx * s_rule,const struct ice_dummy_pkt_profile * profile)5704 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5705 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5706 			  const struct ice_dummy_pkt_profile *profile)
5707 {
5708 	u8 *pkt;
5709 	u16 i;
5710 
5711 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5712 	 * in the header values to be looked up or matched.
5713 	 */
5714 	pkt = s_rule->hdr_data;
5715 
5716 	memcpy(pkt, profile->pkt, profile->pkt_len);
5717 
5718 	for (i = 0; i < lkups_cnt; i++) {
5719 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5720 		enum ice_protocol_type type;
5721 		u16 offset = 0, len = 0, j;
5722 		bool found = false;
5723 
5724 		/* find the start of this layer; it should be found since this
5725 		 * was already checked when search for the dummy packet
5726 		 */
5727 		type = lkups[i].type;
5728 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5729 			if (type == offsets[j].type) {
5730 				offset = offsets[j].offset;
5731 				found = true;
5732 				break;
5733 			}
5734 		}
5735 		/* this should never happen in a correct calling sequence */
5736 		if (!found)
5737 			return -EINVAL;
5738 
5739 		switch (lkups[i].type) {
5740 		case ICE_MAC_OFOS:
5741 		case ICE_MAC_IL:
5742 			len = sizeof(struct ice_ether_hdr);
5743 			break;
5744 		case ICE_ETYPE_OL:
5745 		case ICE_ETYPE_IL:
5746 			len = sizeof(struct ice_ethtype_hdr);
5747 			break;
5748 		case ICE_VLAN_OFOS:
5749 		case ICE_VLAN_EX:
5750 		case ICE_VLAN_IN:
5751 			len = sizeof(struct ice_vlan_hdr);
5752 			break;
5753 		case ICE_IPV4_OFOS:
5754 		case ICE_IPV4_IL:
5755 			len = sizeof(struct ice_ipv4_hdr);
5756 			break;
5757 		case ICE_IPV6_OFOS:
5758 		case ICE_IPV6_IL:
5759 			len = sizeof(struct ice_ipv6_hdr);
5760 			break;
5761 		case ICE_TCP_IL:
5762 		case ICE_UDP_OF:
5763 		case ICE_UDP_ILOS:
5764 			len = sizeof(struct ice_l4_hdr);
5765 			break;
5766 		case ICE_SCTP_IL:
5767 			len = sizeof(struct ice_sctp_hdr);
5768 			break;
5769 		case ICE_NVGRE:
5770 			len = sizeof(struct ice_nvgre_hdr);
5771 			break;
5772 		case ICE_VXLAN:
5773 		case ICE_GENEVE:
5774 			len = sizeof(struct ice_udp_tnl_hdr);
5775 			break;
5776 		case ICE_GTP_NO_PAY:
5777 		case ICE_GTP:
5778 			len = sizeof(struct ice_udp_gtp_hdr);
5779 			break;
5780 		case ICE_PPPOE:
5781 			len = sizeof(struct ice_pppoe_hdr);
5782 			break;
5783 		case ICE_L2TPV3:
5784 			len = sizeof(struct ice_l2tpv3_sess_hdr);
5785 			break;
5786 		default:
5787 			return -EINVAL;
5788 		}
5789 
5790 		/* the length should be a word multiple */
5791 		if (len % ICE_BYTES_PER_WORD)
5792 			return -EIO;
5793 
5794 		/* We have the offset to the header start, the length, the
5795 		 * caller's header values and mask. Use this information to
5796 		 * copy the data into the dummy packet appropriately based on
5797 		 * the mask. Note that we need to only write the bits as
5798 		 * indicated by the mask to make sure we don't improperly write
5799 		 * over any significant packet data.
5800 		 */
5801 		for (j = 0; j < len / sizeof(u16); j++) {
5802 			u16 *ptr = (u16 *)(pkt + offset);
5803 			u16 mask = lkups[i].m_raw[j];
5804 
5805 			if (!mask)
5806 				continue;
5807 
5808 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5809 		}
5810 	}
5811 
5812 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5813 
5814 	return 0;
5815 }
5816 
5817 /**
5818  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5819  * @hw: pointer to the hardware structure
5820  * @tun_type: tunnel type
5821  * @pkt: dummy packet to fill in
5822  * @offsets: offset info for the dummy packet
5823  */
5824 static int
ice_fill_adv_packet_tun(struct ice_hw * hw,enum ice_sw_tunnel_type tun_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)5825 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5826 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5827 {
5828 	u16 open_port, i;
5829 
5830 	switch (tun_type) {
5831 	case ICE_SW_TUN_VXLAN:
5832 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5833 			return -EIO;
5834 		break;
5835 	case ICE_SW_TUN_GENEVE:
5836 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5837 			return -EIO;
5838 		break;
5839 	default:
5840 		/* Nothing needs to be done for this tunnel type */
5841 		return 0;
5842 	}
5843 
5844 	/* Find the outer UDP protocol header and insert the port number */
5845 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5846 		if (offsets[i].type == ICE_UDP_OF) {
5847 			struct ice_l4_hdr *hdr;
5848 			u16 offset;
5849 
5850 			offset = offsets[i].offset;
5851 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5852 			hdr->dst_port = cpu_to_be16(open_port);
5853 
5854 			return 0;
5855 		}
5856 	}
5857 
5858 	return -EIO;
5859 }
5860 
5861 /**
5862  * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5863  * @vlan_type: VLAN tag type
5864  * @pkt: dummy packet to fill in
5865  * @offsets: offset info for the dummy packet
5866  */
5867 static int
ice_fill_adv_packet_vlan(u16 vlan_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)5868 ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
5869 			 const struct ice_dummy_pkt_offsets *offsets)
5870 {
5871 	u16 i;
5872 
5873 	/* Find VLAN header and insert VLAN TPID */
5874 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5875 		if (offsets[i].type == ICE_VLAN_OFOS ||
5876 		    offsets[i].type == ICE_VLAN_EX) {
5877 			struct ice_vlan_hdr *hdr;
5878 			u16 offset;
5879 
5880 			offset = offsets[i].offset;
5881 			hdr = (struct ice_vlan_hdr *)&pkt[offset];
5882 			hdr->type = cpu_to_be16(vlan_type);
5883 
5884 			return 0;
5885 		}
5886 	}
5887 
5888 	return -EIO;
5889 }
5890 
5891 /**
5892  * ice_find_adv_rule_entry - Search a rule entry
5893  * @hw: pointer to the hardware structure
5894  * @lkups: lookup elements or match criteria for the advanced recipe, one
5895  *	   structure per protocol header
5896  * @lkups_cnt: number of protocols
5897  * @recp_id: recipe ID for which we are finding the rule
5898  * @rinfo: other information regarding the rule e.g. priority and action info
5899  *
5900  * Helper function to search for a given advance rule entry
5901  * Returns pointer to entry storing the rule if found
5902  */
5903 static struct ice_adv_fltr_mgmt_list_entry *
ice_find_adv_rule_entry(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,u16 recp_id,struct ice_adv_rule_info * rinfo)5904 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5905 			u16 lkups_cnt, u16 recp_id,
5906 			struct ice_adv_rule_info *rinfo)
5907 {
5908 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5909 	struct ice_switch_info *sw = hw->switch_info;
5910 	int i;
5911 
5912 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5913 			    list_entry) {
5914 		bool lkups_matched = true;
5915 
5916 		if (lkups_cnt != list_itr->lkups_cnt)
5917 			continue;
5918 		for (i = 0; i < list_itr->lkups_cnt; i++)
5919 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5920 				   sizeof(*lkups))) {
5921 				lkups_matched = false;
5922 				break;
5923 			}
5924 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5925 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
5926 		    rinfo->vlan_type == list_itr->rule_info.vlan_type &&
5927 		    lkups_matched)
5928 			return list_itr;
5929 	}
5930 	return NULL;
5931 }
5932 
5933 /**
5934  * ice_adv_add_update_vsi_list
5935  * @hw: pointer to the hardware structure
5936  * @m_entry: pointer to current adv filter management list entry
5937  * @cur_fltr: filter information from the book keeping entry
5938  * @new_fltr: filter information with the new VSI to be added
5939  *
5940  * Call AQ command to add or update previously created VSI list with new VSI.
5941  *
5942  * Helper function to do book keeping associated with adding filter information
5943  * The algorithm to do the booking keeping is described below :
5944  * When a VSI needs to subscribe to a given advanced filter
5945  *	if only one VSI has been added till now
5946  *		Allocate a new VSI list and add two VSIs
5947  *		to this list using switch rule command
5948  *		Update the previously created switch rule with the
5949  *		newly created VSI list ID
5950  *	if a VSI list was previously created
5951  *		Add the new VSI to the previously created VSI list set
5952  *		using the update switch rule command
5953  */
5954 static int
ice_adv_add_update_vsi_list(struct ice_hw * hw,struct ice_adv_fltr_mgmt_list_entry * m_entry,struct ice_adv_rule_info * cur_fltr,struct ice_adv_rule_info * new_fltr)5955 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5956 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5957 			    struct ice_adv_rule_info *cur_fltr,
5958 			    struct ice_adv_rule_info *new_fltr)
5959 {
5960 	u16 vsi_list_id = 0;
5961 	int status;
5962 
5963 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5964 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5965 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5966 		return -EOPNOTSUPP;
5967 
5968 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5969 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5970 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5971 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5972 		return -EOPNOTSUPP;
5973 
5974 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5975 		 /* Only one entry existed in the mapping and it was not already
5976 		  * a part of a VSI list. So, create a VSI list with the old and
5977 		  * new VSIs.
5978 		  */
5979 		struct ice_fltr_info tmp_fltr;
5980 		u16 vsi_handle_arr[2];
5981 
5982 		/* A rule already exists with the new VSI being added */
5983 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5984 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5985 			return -EEXIST;
5986 
5987 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5988 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5989 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5990 						  &vsi_list_id,
5991 						  ICE_SW_LKUP_LAST);
5992 		if (status)
5993 			return status;
5994 
5995 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5996 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5997 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5998 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5999 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6000 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6001 
6002 		/* Update the previous switch rule of "forward to VSI" to
6003 		 * "fwd to VSI list"
6004 		 */
6005 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6006 		if (status)
6007 			return status;
6008 
6009 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6010 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6011 		m_entry->vsi_list_info =
6012 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6013 						vsi_list_id);
6014 	} else {
6015 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6016 
6017 		if (!m_entry->vsi_list_info)
6018 			return -EIO;
6019 
6020 		/* A rule already exists with the new VSI being added */
6021 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
6022 			return 0;
6023 
6024 		/* Update the previously created VSI list set with
6025 		 * the new VSI ID passed in
6026 		 */
6027 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6028 
6029 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6030 						  vsi_list_id, false,
6031 						  ice_aqc_opc_update_sw_rules,
6032 						  ICE_SW_LKUP_LAST);
6033 		/* update VSI list mapping info with new VSI ID */
6034 		if (!status)
6035 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
6036 	}
6037 	if (!status)
6038 		m_entry->vsi_count++;
6039 	return status;
6040 }
6041 
6042 /**
6043  * ice_add_adv_rule - helper function to create an advanced switch rule
6044  * @hw: pointer to the hardware structure
6045  * @lkups: information on the words that needs to be looked up. All words
6046  * together makes one recipe
6047  * @lkups_cnt: num of entries in the lkups array
6048  * @rinfo: other information related to the rule that needs to be programmed
6049  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6050  *               ignored is case of error.
6051  *
6052  * This function can program only 1 rule at a time. The lkups is used to
6053  * describe the all the words that forms the "lookup" portion of the recipe.
6054  * These words can span multiple protocols. Callers to this function need to
6055  * pass in a list of protocol headers with lookup information along and mask
6056  * that determines which words are valid from the given protocol header.
6057  * rinfo describes other information related to this rule such as forwarding
6058  * IDs, priority of this rule, etc.
6059  */
6060 int
ice_add_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,struct ice_rule_query_data * added_entry)6061 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6062 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6063 		 struct ice_rule_query_data *added_entry)
6064 {
6065 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6066 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6067 	const struct ice_dummy_pkt_profile *profile;
6068 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
6069 	struct list_head *rule_head;
6070 	struct ice_switch_info *sw;
6071 	u16 word_cnt;
6072 	u32 act = 0;
6073 	int status;
6074 	u8 q_rgn;
6075 
6076 	/* Initialize profile to result index bitmap */
6077 	if (!hw->switch_info->prof_res_bm_init) {
6078 		hw->switch_info->prof_res_bm_init = 1;
6079 		ice_init_prof_result_bm(hw);
6080 	}
6081 
6082 	if (!lkups_cnt)
6083 		return -EINVAL;
6084 
6085 	/* get # of words we need to match */
6086 	word_cnt = 0;
6087 	for (i = 0; i < lkups_cnt; i++) {
6088 		u16 j;
6089 
6090 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6091 			if (lkups[i].m_raw[j])
6092 				word_cnt++;
6093 	}
6094 
6095 	if (!word_cnt)
6096 		return -EINVAL;
6097 
6098 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
6099 		return -ENOSPC;
6100 
6101 	/* locate a dummy packet */
6102 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6103 	if (IS_ERR(profile))
6104 		return PTR_ERR(profile);
6105 
6106 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6107 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6108 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6109 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6110 		status = -EIO;
6111 		goto free_pkt_profile;
6112 	}
6113 
6114 	vsi_handle = rinfo->sw_act.vsi_handle;
6115 	if (!ice_is_vsi_valid(hw, vsi_handle)) {
6116 		status =  -EINVAL;
6117 		goto free_pkt_profile;
6118 	}
6119 
6120 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6121 		rinfo->sw_act.fwd_id.hw_vsi_id =
6122 			ice_get_hw_vsi_num(hw, vsi_handle);
6123 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
6124 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6125 
6126 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6127 	if (status)
6128 		goto free_pkt_profile;
6129 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6130 	if (m_entry) {
6131 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6132 		 * Also Update VSI list so that we can change forwarding rule
6133 		 * if the rule already exists, we will check if it exists with
6134 		 * same vsi_id, if not then add it to the VSI list if it already
6135 		 * exists if not then create a VSI list and add the existing VSI
6136 		 * ID and the new VSI ID to the list
6137 		 * We will add that VSI to the list
6138 		 */
6139 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6140 						     &m_entry->rule_info,
6141 						     rinfo);
6142 		if (added_entry) {
6143 			added_entry->rid = rid;
6144 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6145 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6146 		}
6147 		goto free_pkt_profile;
6148 	}
6149 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6150 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6151 	if (!s_rule) {
6152 		status = -ENOMEM;
6153 		goto free_pkt_profile;
6154 	}
6155 	if (!rinfo->flags_info.act_valid) {
6156 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6157 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6158 	} else {
6159 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6160 						ICE_SINGLE_ACT_LB_ENABLE);
6161 	}
6162 
6163 	switch (rinfo->sw_act.fltr_act) {
6164 	case ICE_FWD_TO_VSI:
6165 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6166 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6167 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6168 		break;
6169 	case ICE_FWD_TO_Q:
6170 		act |= ICE_SINGLE_ACT_TO_Q;
6171 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6172 		       ICE_SINGLE_ACT_Q_INDEX_M;
6173 		break;
6174 	case ICE_FWD_TO_QGRP:
6175 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6176 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6177 		act |= ICE_SINGLE_ACT_TO_Q;
6178 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6179 		       ICE_SINGLE_ACT_Q_INDEX_M;
6180 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6181 		       ICE_SINGLE_ACT_Q_REGION_M;
6182 		break;
6183 	case ICE_DROP_PACKET:
6184 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6185 		       ICE_SINGLE_ACT_VALID_BIT;
6186 		break;
6187 	default:
6188 		status = -EIO;
6189 		goto err_ice_add_adv_rule;
6190 	}
6191 
6192 	/* set the rule LOOKUP type based on caller specified 'Rx'
6193 	 * instead of hardcoding it to be either LOOKUP_TX/RX
6194 	 *
6195 	 * for 'Rx' set the source to be the port number
6196 	 * for 'Tx' set the source to be the source HW VSI number (determined
6197 	 * by caller)
6198 	 */
6199 	if (rinfo->rx) {
6200 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6201 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6202 	} else {
6203 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6204 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6205 	}
6206 
6207 	s_rule->recipe_id = cpu_to_le16(rid);
6208 	s_rule->act = cpu_to_le32(act);
6209 
6210 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6211 	if (status)
6212 		goto err_ice_add_adv_rule;
6213 
6214 	if (rinfo->tun_type != ICE_NON_TUN &&
6215 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6216 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6217 						 s_rule->hdr_data,
6218 						 profile->offsets);
6219 		if (status)
6220 			goto err_ice_add_adv_rule;
6221 	}
6222 
6223 	if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
6224 		status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
6225 						  s_rule->hdr_data,
6226 						  profile->offsets);
6227 		if (status)
6228 			goto err_ice_add_adv_rule;
6229 	}
6230 
6231 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6232 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6233 				 NULL);
6234 	if (status)
6235 		goto err_ice_add_adv_rule;
6236 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6237 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6238 				GFP_KERNEL);
6239 	if (!adv_fltr) {
6240 		status = -ENOMEM;
6241 		goto err_ice_add_adv_rule;
6242 	}
6243 
6244 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6245 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6246 	if (!adv_fltr->lkups) {
6247 		status = -ENOMEM;
6248 		goto err_ice_add_adv_rule;
6249 	}
6250 
6251 	adv_fltr->lkups_cnt = lkups_cnt;
6252 	adv_fltr->rule_info = *rinfo;
6253 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6254 	sw = hw->switch_info;
6255 	sw->recp_list[rid].adv_rule = true;
6256 	rule_head = &sw->recp_list[rid].filt_rules;
6257 
6258 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6259 		adv_fltr->vsi_count = 1;
6260 
6261 	/* Add rule entry to book keeping list */
6262 	list_add(&adv_fltr->list_entry, rule_head);
6263 	if (added_entry) {
6264 		added_entry->rid = rid;
6265 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6266 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6267 	}
6268 err_ice_add_adv_rule:
6269 	if (status && adv_fltr) {
6270 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6271 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6272 	}
6273 
6274 	kfree(s_rule);
6275 
6276 free_pkt_profile:
6277 	if (profile->match & ICE_PKT_KMALLOC) {
6278 		kfree(profile->offsets);
6279 		kfree(profile->pkt);
6280 		kfree(profile);
6281 	}
6282 
6283 	return status;
6284 }
6285 
6286 /**
6287  * ice_replay_vsi_fltr - Replay filters for requested VSI
6288  * @hw: pointer to the hardware structure
6289  * @vsi_handle: driver VSI handle
6290  * @recp_id: Recipe ID for which rules need to be replayed
6291  * @list_head: list for which filters need to be replayed
6292  *
6293  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6294  * It is required to pass valid VSI handle.
6295  */
6296 static int
ice_replay_vsi_fltr(struct ice_hw * hw,u16 vsi_handle,u8 recp_id,struct list_head * list_head)6297 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6298 		    struct list_head *list_head)
6299 {
6300 	struct ice_fltr_mgmt_list_entry *itr;
6301 	int status = 0;
6302 	u16 hw_vsi_id;
6303 
6304 	if (list_empty(list_head))
6305 		return status;
6306 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6307 
6308 	list_for_each_entry(itr, list_head, list_entry) {
6309 		struct ice_fltr_list_entry f_entry;
6310 
6311 		f_entry.fltr_info = itr->fltr_info;
6312 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6313 		    itr->fltr_info.vsi_handle == vsi_handle) {
6314 			/* update the src in case it is VSI num */
6315 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6316 				f_entry.fltr_info.src = hw_vsi_id;
6317 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6318 			if (status)
6319 				goto end;
6320 			continue;
6321 		}
6322 		if (!itr->vsi_list_info ||
6323 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6324 			continue;
6325 		/* Clearing it so that the logic can add it back */
6326 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6327 		f_entry.fltr_info.vsi_handle = vsi_handle;
6328 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6329 		/* update the src in case it is VSI num */
6330 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6331 			f_entry.fltr_info.src = hw_vsi_id;
6332 		if (recp_id == ICE_SW_LKUP_VLAN)
6333 			status = ice_add_vlan_internal(hw, &f_entry);
6334 		else
6335 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6336 		if (status)
6337 			goto end;
6338 	}
6339 end:
6340 	return status;
6341 }
6342 
6343 /**
6344  * ice_adv_rem_update_vsi_list
6345  * @hw: pointer to the hardware structure
6346  * @vsi_handle: VSI handle of the VSI to remove
6347  * @fm_list: filter management entry for which the VSI list management needs to
6348  *	     be done
6349  */
6350 static int
ice_adv_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_adv_fltr_mgmt_list_entry * fm_list)6351 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6352 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6353 {
6354 	struct ice_vsi_list_map_info *vsi_list_info;
6355 	enum ice_sw_lkup_type lkup_type;
6356 	u16 vsi_list_id;
6357 	int status;
6358 
6359 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6360 	    fm_list->vsi_count == 0)
6361 		return -EINVAL;
6362 
6363 	/* A rule with the VSI being removed does not exist */
6364 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6365 		return -ENOENT;
6366 
6367 	lkup_type = ICE_SW_LKUP_LAST;
6368 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6369 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6370 					  ice_aqc_opc_update_sw_rules,
6371 					  lkup_type);
6372 	if (status)
6373 		return status;
6374 
6375 	fm_list->vsi_count--;
6376 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6377 	vsi_list_info = fm_list->vsi_list_info;
6378 	if (fm_list->vsi_count == 1) {
6379 		struct ice_fltr_info tmp_fltr;
6380 		u16 rem_vsi_handle;
6381 
6382 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6383 						ICE_MAX_VSI);
6384 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6385 			return -EIO;
6386 
6387 		/* Make sure VSI list is empty before removing it below */
6388 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6389 						  vsi_list_id, true,
6390 						  ice_aqc_opc_update_sw_rules,
6391 						  lkup_type);
6392 		if (status)
6393 			return status;
6394 
6395 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6396 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6397 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6398 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6399 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6400 		tmp_fltr.fwd_id.hw_vsi_id =
6401 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6402 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6403 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6404 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6405 
6406 		/* Update the previous switch rule of "MAC forward to VSI" to
6407 		 * "MAC fwd to VSI list"
6408 		 */
6409 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6410 		if (status) {
6411 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6412 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6413 			return status;
6414 		}
6415 		fm_list->vsi_list_info->ref_cnt--;
6416 
6417 		/* Remove the VSI list since it is no longer used */
6418 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6419 		if (status) {
6420 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6421 				  vsi_list_id, status);
6422 			return status;
6423 		}
6424 
6425 		list_del(&vsi_list_info->list_entry);
6426 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6427 		fm_list->vsi_list_info = NULL;
6428 	}
6429 
6430 	return status;
6431 }
6432 
6433 /**
6434  * ice_rem_adv_rule - removes existing advanced switch rule
6435  * @hw: pointer to the hardware structure
6436  * @lkups: information on the words that needs to be looked up. All words
6437  *         together makes one recipe
6438  * @lkups_cnt: num of entries in the lkups array
6439  * @rinfo: Its the pointer to the rule information for the rule
6440  *
6441  * This function can be used to remove 1 rule at a time. The lkups is
6442  * used to describe all the words that forms the "lookup" portion of the
6443  * rule. These words can span multiple protocols. Callers to this function
6444  * need to pass in a list of protocol headers with lookup information along
6445  * and mask that determines which words are valid from the given protocol
6446  * header. rinfo describes other information related to this rule such as
6447  * forwarding IDs, priority of this rule, etc.
6448  */
6449 static int
ice_rem_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo)6450 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6451 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6452 {
6453 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6454 	struct ice_prot_lkup_ext lkup_exts;
6455 	bool remove_rule = false;
6456 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6457 	u16 i, rid, vsi_handle;
6458 	int status = 0;
6459 
6460 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6461 	for (i = 0; i < lkups_cnt; i++) {
6462 		u16 count;
6463 
6464 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6465 			return -EIO;
6466 
6467 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6468 		if (!count)
6469 			return -EIO;
6470 	}
6471 
6472 	/* Create any special protocol/offset pairs, such as looking at tunnel
6473 	 * bits by extracting metadata
6474 	 */
6475 	status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
6476 	if (status)
6477 		return status;
6478 
6479 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6480 	/* If did not find a recipe that match the existing criteria */
6481 	if (rid == ICE_MAX_NUM_RECIPES)
6482 		return -EINVAL;
6483 
6484 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6485 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6486 	/* the rule is already removed */
6487 	if (!list_elem)
6488 		return 0;
6489 	mutex_lock(rule_lock);
6490 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6491 		remove_rule = true;
6492 	} else if (list_elem->vsi_count > 1) {
6493 		remove_rule = false;
6494 		vsi_handle = rinfo->sw_act.vsi_handle;
6495 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6496 	} else {
6497 		vsi_handle = rinfo->sw_act.vsi_handle;
6498 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6499 		if (status) {
6500 			mutex_unlock(rule_lock);
6501 			return status;
6502 		}
6503 		if (list_elem->vsi_count == 0)
6504 			remove_rule = true;
6505 	}
6506 	mutex_unlock(rule_lock);
6507 	if (remove_rule) {
6508 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6509 		u16 rule_buf_sz;
6510 
6511 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6512 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6513 		if (!s_rule)
6514 			return -ENOMEM;
6515 		s_rule->act = 0;
6516 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6517 		s_rule->hdr_len = 0;
6518 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6519 					 rule_buf_sz, 1,
6520 					 ice_aqc_opc_remove_sw_rules, NULL);
6521 		if (!status || status == -ENOENT) {
6522 			struct ice_switch_info *sw = hw->switch_info;
6523 
6524 			mutex_lock(rule_lock);
6525 			list_del(&list_elem->list_entry);
6526 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6527 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6528 			mutex_unlock(rule_lock);
6529 			if (list_empty(&sw->recp_list[rid].filt_rules))
6530 				sw->recp_list[rid].adv_rule = false;
6531 		}
6532 		kfree(s_rule);
6533 	}
6534 	return status;
6535 }
6536 
6537 /**
6538  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6539  * @hw: pointer to the hardware structure
6540  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6541  *
6542  * This function is used to remove 1 rule at a time. The removal is based on
6543  * the remove_entry parameter. This function will remove rule for a given
6544  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6545  */
6546 int
ice_rem_adv_rule_by_id(struct ice_hw * hw,struct ice_rule_query_data * remove_entry)6547 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6548 		       struct ice_rule_query_data *remove_entry)
6549 {
6550 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6551 	struct list_head *list_head;
6552 	struct ice_adv_rule_info rinfo;
6553 	struct ice_switch_info *sw;
6554 
6555 	sw = hw->switch_info;
6556 	if (!sw->recp_list[remove_entry->rid].recp_created)
6557 		return -EINVAL;
6558 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6559 	list_for_each_entry(list_itr, list_head, list_entry) {
6560 		if (list_itr->rule_info.fltr_rule_id ==
6561 		    remove_entry->rule_id) {
6562 			rinfo = list_itr->rule_info;
6563 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6564 			return ice_rem_adv_rule(hw, list_itr->lkups,
6565 						list_itr->lkups_cnt, &rinfo);
6566 		}
6567 	}
6568 	/* either list is empty or unable to find rule */
6569 	return -ENOENT;
6570 }
6571 
6572 /**
6573  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6574  *                            given VSI handle
6575  * @hw: pointer to the hardware structure
6576  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6577  *
6578  * This function is used to remove all the rules for a given VSI and as soon
6579  * as removing a rule fails, it will return immediately with the error code,
6580  * else it will return success.
6581  */
ice_rem_adv_rule_for_vsi(struct ice_hw * hw,u16 vsi_handle)6582 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6583 {
6584 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6585 	struct ice_vsi_list_map_info *map_info;
6586 	struct ice_adv_rule_info rinfo;
6587 	struct list_head *list_head;
6588 	struct ice_switch_info *sw;
6589 	int status;
6590 	u8 rid;
6591 
6592 	sw = hw->switch_info;
6593 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6594 		if (!sw->recp_list[rid].recp_created)
6595 			continue;
6596 		if (!sw->recp_list[rid].adv_rule)
6597 			continue;
6598 
6599 		list_head = &sw->recp_list[rid].filt_rules;
6600 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6601 					 list_entry) {
6602 			rinfo = list_itr->rule_info;
6603 
6604 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6605 				map_info = list_itr->vsi_list_info;
6606 				if (!map_info)
6607 					continue;
6608 
6609 				if (!test_bit(vsi_handle, map_info->vsi_map))
6610 					continue;
6611 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6612 				continue;
6613 			}
6614 
6615 			rinfo.sw_act.vsi_handle = vsi_handle;
6616 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6617 						  list_itr->lkups_cnt, &rinfo);
6618 			if (status)
6619 				return status;
6620 		}
6621 	}
6622 	return 0;
6623 }
6624 
6625 /**
6626  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6627  * @hw: pointer to the hardware structure
6628  * @vsi_handle: driver VSI handle
6629  * @list_head: list for which filters need to be replayed
6630  *
6631  * Replay the advanced rule for the given VSI.
6632  */
6633 static int
ice_replay_vsi_adv_rule(struct ice_hw * hw,u16 vsi_handle,struct list_head * list_head)6634 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6635 			struct list_head *list_head)
6636 {
6637 	struct ice_rule_query_data added_entry = { 0 };
6638 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6639 	int status = 0;
6640 
6641 	if (list_empty(list_head))
6642 		return status;
6643 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6644 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6645 		u16 lk_cnt = adv_fltr->lkups_cnt;
6646 
6647 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6648 			continue;
6649 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6650 					  &added_entry);
6651 		if (status)
6652 			break;
6653 	}
6654 	return status;
6655 }
6656 
6657 /**
6658  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6659  * @hw: pointer to the hardware structure
6660  * @vsi_handle: driver VSI handle
6661  *
6662  * Replays filters for requested VSI via vsi_handle.
6663  */
ice_replay_vsi_all_fltr(struct ice_hw * hw,u16 vsi_handle)6664 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6665 {
6666 	struct ice_switch_info *sw = hw->switch_info;
6667 	int status;
6668 	u8 i;
6669 
6670 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6671 		struct list_head *head;
6672 
6673 		head = &sw->recp_list[i].filt_replay_rules;
6674 		if (!sw->recp_list[i].adv_rule)
6675 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6676 		else
6677 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6678 		if (status)
6679 			return status;
6680 	}
6681 	return status;
6682 }
6683 
6684 /**
6685  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6686  * @hw: pointer to the HW struct
6687  *
6688  * Deletes the filter replay rules.
6689  */
ice_rm_all_sw_replay_rule_info(struct ice_hw * hw)6690 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6691 {
6692 	struct ice_switch_info *sw = hw->switch_info;
6693 	u8 i;
6694 
6695 	if (!sw)
6696 		return;
6697 
6698 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6699 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6700 			struct list_head *l_head;
6701 
6702 			l_head = &sw->recp_list[i].filt_replay_rules;
6703 			if (!sw->recp_list[i].adv_rule)
6704 				ice_rem_sw_rule_info(hw, l_head);
6705 			else
6706 				ice_rem_adv_rule_info(hw, l_head);
6707 		}
6708 	}
6709 }
6710