• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6 
7 #include "cmsg.h"
8 #include "main.h"
9 
10 void
nfp_flower_compile_meta(struct nfp_flower_meta_tci * ext,struct nfp_flower_meta_tci * msk,u8 key_type)11 nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
12 			struct nfp_flower_meta_tci *msk, u8 key_type)
13 {
14 	/* Populate the metadata frame. */
15 	ext->nfp_flow_key_layer = key_type;
16 	ext->mask_id = ~0;
17 
18 	msk->nfp_flow_key_layer = key_type;
19 	msk->mask_id = ~0;
20 }
21 
22 void
nfp_flower_compile_tci(struct nfp_flower_meta_tci * ext,struct nfp_flower_meta_tci * msk,struct flow_rule * rule)23 nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
24 		       struct nfp_flower_meta_tci *msk,
25 		       struct flow_rule *rule)
26 {
27 	u16 msk_tci, key_tci;
28 
29 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
30 		struct flow_match_vlan match;
31 
32 		flow_rule_match_vlan(rule, &match);
33 		/* Populate the tci field. */
34 		key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
35 		key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
36 				      match.key->vlan_priority) |
37 			   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
38 				      match.key->vlan_id);
39 
40 		msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 		msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 				      match.mask->vlan_priority) |
43 			   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 				      match.mask->vlan_id);
45 
46 		ext->tci |= cpu_to_be16((key_tci & msk_tci));
47 		msk->tci |= cpu_to_be16(msk_tci);
48 	}
49 }
50 
51 static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci * ext,struct nfp_flower_meta_tci * msk,struct flow_rule * rule,u8 key_type,bool qinq_sup)52 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
53 			    struct nfp_flower_meta_tci *msk,
54 			    struct flow_rule *rule, u8 key_type, bool qinq_sup)
55 {
56 	memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
57 	memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
58 
59 	nfp_flower_compile_meta(ext, msk, key_type);
60 
61 	if (!qinq_sup)
62 		nfp_flower_compile_tci(ext, msk, rule);
63 }
64 
65 void
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta * frame,u32 key_ext)66 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
67 {
68 	frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
69 }
70 
71 int
nfp_flower_compile_port(struct nfp_flower_in_port * frame,u32 cmsg_port,bool mask_version,enum nfp_flower_tun_type tun_type,struct netlink_ext_ack * extack)72 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
73 			bool mask_version, enum nfp_flower_tun_type tun_type,
74 			struct netlink_ext_ack *extack)
75 {
76 	if (mask_version) {
77 		frame->in_port = cpu_to_be32(~0);
78 		return 0;
79 	}
80 
81 	if (tun_type) {
82 		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
83 	} else {
84 		if (!cmsg_port) {
85 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
86 			return -EOPNOTSUPP;
87 		}
88 		frame->in_port = cpu_to_be32(cmsg_port);
89 	}
90 
91 	return 0;
92 }
93 
94 void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls * ext,struct nfp_flower_mac_mpls * msk,struct flow_rule * rule)95 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
96 		       struct nfp_flower_mac_mpls *msk,
97 		       struct flow_rule *rule)
98 {
99 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
100 		struct flow_match_eth_addrs match;
101 		u8 tmp;
102 		int i;
103 
104 		flow_rule_match_eth_addrs(rule, &match);
105 		/* Populate mac frame. */
106 		for (i = 0; i < ETH_ALEN; i++) {
107 			tmp = match.key->dst[i] & match.mask->dst[i];
108 			ext->mac_dst[i] |= tmp & (~msk->mac_dst[i]);
109 			msk->mac_dst[i] |= match.mask->dst[i];
110 
111 			tmp = match.key->src[i] & match.mask->src[i];
112 			ext->mac_src[i] |= tmp & (~msk->mac_src[i]);
113 			msk->mac_src[i] |= match.mask->src[i];
114 		}
115 	}
116 }
117 
118 int
nfp_flower_compile_mpls(struct nfp_flower_mac_mpls * ext,struct nfp_flower_mac_mpls * msk,struct flow_rule * rule,struct netlink_ext_ack * extack)119 nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
120 			struct nfp_flower_mac_mpls *msk,
121 			struct flow_rule *rule,
122 			struct netlink_ext_ack *extack)
123 {
124 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
125 		struct flow_match_mpls match;
126 		u32 key_mpls, msk_mpls;
127 
128 		flow_rule_match_mpls(rule, &match);
129 
130 		/* Only support matching the first LSE */
131 		if (match.mask->used_lses != 1) {
132 			NL_SET_ERR_MSG_MOD(extack,
133 					   "unsupported offload: invalid LSE depth for MPLS match offload");
134 			return -EOPNOTSUPP;
135 		}
136 
137 		key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
138 				      match.key->ls[0].mpls_label) |
139 			   FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
140 				      match.key->ls[0].mpls_tc) |
141 			   FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
142 				      match.key->ls[0].mpls_bos) |
143 			   NFP_FLOWER_MASK_MPLS_Q;
144 
145 		msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
146 				      match.mask->ls[0].mpls_label) |
147 			   FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
148 				      match.mask->ls[0].mpls_tc) |
149 			   FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
150 				      match.mask->ls[0].mpls_bos) |
151 			   NFP_FLOWER_MASK_MPLS_Q;
152 
153 		ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
154 		msk->mpls_lse |= cpu_to_be32(msk_mpls);
155 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
156 		/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
157 		 * bit, which indicates an mpls ether type but without any
158 		 * mpls fields.
159 		 */
160 		struct flow_match_basic match;
161 
162 		flow_rule_match_basic(rule, &match);
163 		if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
164 		    match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
165 			ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
166 			msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
167 		}
168 	}
169 
170 	return 0;
171 }
172 
173 static int
nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls * ext,struct nfp_flower_mac_mpls * msk,struct flow_rule * rule,struct netlink_ext_ack * extack)174 nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
175 			    struct nfp_flower_mac_mpls *msk,
176 			    struct flow_rule *rule,
177 			    struct netlink_ext_ack *extack)
178 {
179 	memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
180 	memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
181 
182 	nfp_flower_compile_mac(ext, msk, rule);
183 
184 	return nfp_flower_compile_mpls(ext, msk, rule, extack);
185 }
186 
187 void
nfp_flower_compile_tport(struct nfp_flower_tp_ports * ext,struct nfp_flower_tp_ports * msk,struct flow_rule * rule)188 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
189 			 struct nfp_flower_tp_ports *msk,
190 			 struct flow_rule *rule)
191 {
192 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
193 		struct flow_match_ports match;
194 		__be16 tmp;
195 
196 		flow_rule_match_ports(rule, &match);
197 
198 		tmp = match.key->src & match.mask->src;
199 		ext->port_src |= tmp & (~msk->port_src);
200 		msk->port_src |= match.mask->src;
201 
202 		tmp = match.key->dst & match.mask->dst;
203 		ext->port_dst |= tmp & (~msk->port_dst);
204 		msk->port_dst |= match.mask->dst;
205 	}
206 }
207 
208 static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext * ext,struct nfp_flower_ip_ext * msk,struct flow_rule * rule)209 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
210 			  struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
211 {
212 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
213 		struct flow_match_basic match;
214 
215 		flow_rule_match_basic(rule, &match);
216 		ext->proto |= match.key->ip_proto & match.mask->ip_proto;
217 		msk->proto |= match.mask->ip_proto;
218 	}
219 
220 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
221 		struct flow_match_ip match;
222 		u8 tmp;
223 
224 		flow_rule_match_ip(rule, &match);
225 
226 		tmp = match.key->tos & match.mask->tos;
227 		ext->tos |= tmp & (~msk->tos);
228 		msk->tos |= match.mask->tos;
229 
230 		tmp = match.key->ttl & match.mask->ttl;
231 		ext->ttl |= tmp & (~msk->ttl);
232 		msk->ttl |= match.mask->ttl;
233 	}
234 
235 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
236 		u16 tcp_flags, tcp_flags_mask;
237 		struct flow_match_tcp match;
238 
239 		flow_rule_match_tcp(rule, &match);
240 		tcp_flags = be16_to_cpu(match.key->flags);
241 		tcp_flags_mask = be16_to_cpu(match.mask->flags);
242 
243 		if (tcp_flags & TCPHDR_FIN)
244 			ext->flags |= NFP_FL_TCP_FLAG_FIN;
245 		if (tcp_flags_mask & TCPHDR_FIN)
246 			msk->flags |= NFP_FL_TCP_FLAG_FIN;
247 
248 		if (tcp_flags & TCPHDR_SYN)
249 			ext->flags |= NFP_FL_TCP_FLAG_SYN;
250 		if (tcp_flags_mask & TCPHDR_SYN)
251 			msk->flags |= NFP_FL_TCP_FLAG_SYN;
252 
253 		if (tcp_flags & TCPHDR_RST)
254 			ext->flags |= NFP_FL_TCP_FLAG_RST;
255 		if (tcp_flags_mask & TCPHDR_RST)
256 			msk->flags |= NFP_FL_TCP_FLAG_RST;
257 
258 		if (tcp_flags & TCPHDR_PSH)
259 			ext->flags |= NFP_FL_TCP_FLAG_PSH;
260 		if (tcp_flags_mask & TCPHDR_PSH)
261 			msk->flags |= NFP_FL_TCP_FLAG_PSH;
262 
263 		if (tcp_flags & TCPHDR_URG)
264 			ext->flags |= NFP_FL_TCP_FLAG_URG;
265 		if (tcp_flags_mask & TCPHDR_URG)
266 			msk->flags |= NFP_FL_TCP_FLAG_URG;
267 	}
268 
269 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
270 		struct flow_match_control match;
271 
272 		flow_rule_match_control(rule, &match);
273 		if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
274 			ext->flags |= NFP_FL_IP_FRAGMENTED;
275 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
276 			msk->flags |= NFP_FL_IP_FRAGMENTED;
277 		if (match.key->flags & FLOW_DIS_FIRST_FRAG)
278 			ext->flags |= NFP_FL_IP_FRAG_FIRST;
279 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
280 			msk->flags |= NFP_FL_IP_FRAG_FIRST;
281 	}
282 }
283 
284 static void
nfp_flower_fill_vlan(struct flow_match_vlan * match,struct nfp_flower_vlan * ext,struct nfp_flower_vlan * msk,bool outer_vlan)285 nfp_flower_fill_vlan(struct flow_match_vlan *match,
286 		     struct nfp_flower_vlan *ext,
287 		     struct nfp_flower_vlan *msk, bool outer_vlan)
288 {
289 	struct flow_dissector_key_vlan *mask = match->mask;
290 	struct flow_dissector_key_vlan *key = match->key;
291 	u16 msk_tci, key_tci;
292 
293 	key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
294 	key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
295 			      key->vlan_priority) |
296 		   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
297 			      key->vlan_id);
298 	msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
299 	msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
300 			      mask->vlan_priority) |
301 		   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
302 			      mask->vlan_id);
303 
304 	if (outer_vlan) {
305 		ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
306 		ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
307 		msk->outer_tci |= cpu_to_be16(msk_tci);
308 		msk->outer_tpid |= mask->vlan_tpid;
309 	} else {
310 		ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
311 		ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
312 		msk->inner_tci |= cpu_to_be16(msk_tci);
313 		msk->inner_tpid |= mask->vlan_tpid;
314 	}
315 }
316 
317 void
nfp_flower_compile_vlan(struct nfp_flower_vlan * ext,struct nfp_flower_vlan * msk,struct flow_rule * rule)318 nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
319 			struct nfp_flower_vlan *msk,
320 			struct flow_rule *rule)
321 {
322 	struct flow_match_vlan match;
323 
324 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
325 		flow_rule_match_vlan(rule, &match);
326 		nfp_flower_fill_vlan(&match, ext, msk, true);
327 	}
328 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
329 		flow_rule_match_cvlan(rule, &match);
330 		nfp_flower_fill_vlan(&match, ext, msk, false);
331 	}
332 }
333 
334 void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 * ext,struct nfp_flower_ipv4 * msk,struct flow_rule * rule)335 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
336 			struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
337 {
338 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
339 		struct flow_match_ipv4_addrs match;
340 		__be32 tmp;
341 
342 		flow_rule_match_ipv4_addrs(rule, &match);
343 
344 		tmp = match.key->src & match.mask->src;
345 		ext->ipv4_src |= tmp & (~msk->ipv4_src);
346 		msk->ipv4_src |= match.mask->src;
347 
348 		tmp = match.key->dst & match.mask->dst;
349 		ext->ipv4_dst |= tmp & (~msk->ipv4_dst);
350 		msk->ipv4_dst |= match.mask->dst;
351 	}
352 
353 	nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
354 }
355 
356 void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 * ext,struct nfp_flower_ipv6 * msk,struct flow_rule * rule)357 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
358 			struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
359 {
360 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
361 		struct flow_match_ipv6_addrs match;
362 		u8 tmp;
363 		int i;
364 
365 		flow_rule_match_ipv6_addrs(rule, &match);
366 		for (i = 0; i < sizeof(ext->ipv6_src); i++) {
367 			tmp = match.key->src.s6_addr[i] &
368 			      match.mask->src.s6_addr[i];
369 			ext->ipv6_src.s6_addr[i] |= tmp &
370 						    (~msk->ipv6_src.s6_addr[i]);
371 			msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
372 
373 			tmp = match.key->dst.s6_addr[i] &
374 			      match.mask->dst.s6_addr[i];
375 			ext->ipv6_dst.s6_addr[i] |= tmp &
376 						    (~msk->ipv6_dst.s6_addr[i]);
377 			msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
378 		}
379 	}
380 
381 	nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
382 }
383 
384 void
nfp_flower_compile_geneve_opt(u8 * ext,u8 * msk,struct flow_rule * rule)385 nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
386 {
387 	struct flow_match_enc_opts match;
388 	int i;
389 
390 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
391 		flow_rule_match_enc_opts(rule, &match);
392 
393 		for (i = 0; i < match.mask->len; i++) {
394 			ext[i] |= match.key->data[i] & match.mask->data[i];
395 			msk[i] |= match.mask->data[i];
396 		}
397 	}
398 }
399 
400 static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 * ext,struct nfp_flower_tun_ipv4 * msk,struct flow_rule * rule)401 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
402 				  struct nfp_flower_tun_ipv4 *msk,
403 				  struct flow_rule *rule)
404 {
405 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
406 		struct flow_match_ipv4_addrs match;
407 
408 		flow_rule_match_enc_ipv4_addrs(rule, &match);
409 		ext->src |= match.key->src & match.mask->src;
410 		ext->dst |= match.key->dst & match.mask->dst;
411 		msk->src |= match.mask->src;
412 		msk->dst |= match.mask->dst;
413 	}
414 }
415 
416 static void
nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 * ext,struct nfp_flower_tun_ipv6 * msk,struct flow_rule * rule)417 nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
418 				  struct nfp_flower_tun_ipv6 *msk,
419 				  struct flow_rule *rule)
420 {
421 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
422 		struct flow_match_ipv6_addrs match;
423 		int i;
424 
425 		flow_rule_match_enc_ipv6_addrs(rule, &match);
426 		for (i = 0; i < sizeof(ext->src); i++) {
427 			ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
428 					       match.mask->src.s6_addr[i];
429 			ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
430 					       match.mask->dst.s6_addr[i];
431 			msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
432 			msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
433 		}
434 	}
435 }
436 
437 static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext * ext,struct nfp_flower_tun_ip_ext * msk,struct flow_rule * rule)438 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
439 			      struct nfp_flower_tun_ip_ext *msk,
440 			      struct flow_rule *rule)
441 {
442 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
443 		struct flow_match_ip match;
444 
445 		flow_rule_match_enc_ip(rule, &match);
446 		ext->tos |= match.key->tos & match.mask->tos;
447 		ext->ttl |= match.key->ttl & match.mask->ttl;
448 		msk->tos |= match.mask->tos;
449 		msk->ttl |= match.mask->ttl;
450 	}
451 }
452 
453 static void
nfp_flower_compile_tun_udp_key(__be32 * key,__be32 * key_msk,struct flow_rule * rule)454 nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
455 			       struct flow_rule *rule)
456 {
457 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
458 		struct flow_match_enc_keyid match;
459 		u32 vni;
460 
461 		flow_rule_match_enc_keyid(rule, &match);
462 		vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
463 		      NFP_FL_TUN_VNI_OFFSET;
464 		*key |= cpu_to_be32(vni);
465 		vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
466 		*key_msk |= cpu_to_be32(vni);
467 	}
468 }
469 
470 static void
nfp_flower_compile_tun_gre_key(__be32 * key,__be32 * key_msk,__be16 * flags,__be16 * flags_msk,struct flow_rule * rule)471 nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
472 			       __be16 *flags_msk, struct flow_rule *rule)
473 {
474 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
475 		struct flow_match_enc_keyid match;
476 
477 		flow_rule_match_enc_keyid(rule, &match);
478 		*key |= match.key->keyid & match.mask->keyid;
479 		*key_msk |= match.mask->keyid;
480 
481 		*flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
482 		*flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
483 	}
484 }
485 
486 void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun * ext,struct nfp_flower_ipv4_gre_tun * msk,struct flow_rule * rule)487 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
488 				struct nfp_flower_ipv4_gre_tun *msk,
489 				struct flow_rule *rule)
490 {
491 	/* NVGRE is the only supported GRE tunnel type */
492 	ext->ethertype = cpu_to_be16(ETH_P_TEB);
493 	msk->ethertype = cpu_to_be16(~0);
494 
495 	nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
496 	nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
497 	nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
498 				       &ext->tun_flags, &msk->tun_flags, rule);
499 }
500 
501 void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun * ext,struct nfp_flower_ipv4_udp_tun * msk,struct flow_rule * rule)502 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
503 				struct nfp_flower_ipv4_udp_tun *msk,
504 				struct flow_rule *rule)
505 {
506 	nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
507 	nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
508 	nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
509 }
510 
511 void
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun * ext,struct nfp_flower_ipv6_udp_tun * msk,struct flow_rule * rule)512 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
513 				struct nfp_flower_ipv6_udp_tun *msk,
514 				struct flow_rule *rule)
515 {
516 	nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
517 	nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
518 	nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
519 }
520 
521 void
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun * ext,struct nfp_flower_ipv6_gre_tun * msk,struct flow_rule * rule)522 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
523 				struct nfp_flower_ipv6_gre_tun *msk,
524 				struct flow_rule *rule)
525 {
526 	/* NVGRE is the only supported GRE tunnel type */
527 	ext->ethertype = cpu_to_be16(ETH_P_TEB);
528 	msk->ethertype = cpu_to_be16(~0);
529 
530 	nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
531 	nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
532 	nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
533 				       &ext->tun_flags, &msk->tun_flags, rule);
534 }
535 
nfp_flower_compile_flow_match(struct nfp_app * app,struct flow_rule * rule,struct nfp_fl_key_ls * key_ls,struct net_device * netdev,struct nfp_fl_payload * nfp_flow,enum nfp_flower_tun_type tun_type,struct netlink_ext_ack * extack)536 int nfp_flower_compile_flow_match(struct nfp_app *app,
537 				  struct flow_rule *rule,
538 				  struct nfp_fl_key_ls *key_ls,
539 				  struct net_device *netdev,
540 				  struct nfp_fl_payload *nfp_flow,
541 				  enum nfp_flower_tun_type tun_type,
542 				  struct netlink_ext_ack *extack)
543 {
544 	struct nfp_flower_priv *priv = app->priv;
545 	bool qinq_sup;
546 	u32 port_id;
547 	int ext_len;
548 	int err;
549 	u8 *ext;
550 	u8 *msk;
551 
552 	port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
553 
554 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
555 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
556 
557 	ext = nfp_flow->unmasked_data;
558 	msk = nfp_flow->mask_data;
559 
560 	qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
561 
562 	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
563 				    (struct nfp_flower_meta_tci *)msk,
564 				    rule, key_ls->key_layer, qinq_sup);
565 	ext += sizeof(struct nfp_flower_meta_tci);
566 	msk += sizeof(struct nfp_flower_meta_tci);
567 
568 	/* Populate Extended Metadata if Required. */
569 	if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
570 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
571 					    key_ls->key_layer_two);
572 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
573 					    key_ls->key_layer_two);
574 		ext += sizeof(struct nfp_flower_ext_meta);
575 		msk += sizeof(struct nfp_flower_ext_meta);
576 	}
577 
578 	/* Populate Exact Port data. */
579 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
580 				      port_id, false, tun_type, extack);
581 	if (err)
582 		return err;
583 
584 	/* Populate Mask Port Data. */
585 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
586 				      port_id, true, tun_type, extack);
587 	if (err)
588 		return err;
589 
590 	ext += sizeof(struct nfp_flower_in_port);
591 	msk += sizeof(struct nfp_flower_in_port);
592 
593 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
594 		err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
595 						  (struct nfp_flower_mac_mpls *)msk,
596 						  rule, extack);
597 		if (err)
598 			return err;
599 
600 		ext += sizeof(struct nfp_flower_mac_mpls);
601 		msk += sizeof(struct nfp_flower_mac_mpls);
602 	}
603 
604 	if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
605 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
606 					 (struct nfp_flower_tp_ports *)msk,
607 					 rule);
608 		ext += sizeof(struct nfp_flower_tp_ports);
609 		msk += sizeof(struct nfp_flower_tp_ports);
610 	}
611 
612 	if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
613 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
614 					(struct nfp_flower_ipv4 *)msk,
615 					rule);
616 		ext += sizeof(struct nfp_flower_ipv4);
617 		msk += sizeof(struct nfp_flower_ipv4);
618 	}
619 
620 	if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
621 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
622 					(struct nfp_flower_ipv6 *)msk,
623 					rule);
624 		ext += sizeof(struct nfp_flower_ipv6);
625 		msk += sizeof(struct nfp_flower_ipv6);
626 	}
627 
628 	if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
629 		nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
630 					(struct nfp_flower_vlan *)msk,
631 					rule);
632 		ext += sizeof(struct nfp_flower_vlan);
633 		msk += sizeof(struct nfp_flower_vlan);
634 	}
635 
636 	if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
637 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
638 			struct nfp_flower_ipv6_gre_tun *gre_match;
639 			struct nfp_ipv6_addr_entry *entry;
640 			struct in6_addr *dst;
641 
642 			nfp_flower_compile_ipv6_gre_tun((void *)ext,
643 							(void *)msk, rule);
644 			gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
645 			dst = &gre_match->ipv6.dst;
646 			ext += sizeof(struct nfp_flower_ipv6_gre_tun);
647 			msk += sizeof(struct nfp_flower_ipv6_gre_tun);
648 
649 			entry = nfp_tunnel_add_ipv6_off(app, dst);
650 			if (!entry)
651 				return -EOPNOTSUPP;
652 
653 			nfp_flow->nfp_tun_ipv6 = entry;
654 		} else {
655 			__be32 dst;
656 
657 			nfp_flower_compile_ipv4_gre_tun((void *)ext,
658 							(void *)msk, rule);
659 			dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
660 			ext += sizeof(struct nfp_flower_ipv4_gre_tun);
661 			msk += sizeof(struct nfp_flower_ipv4_gre_tun);
662 
663 			/* Store the tunnel destination in the rule data.
664 			 * This must be present and be an exact match.
665 			 */
666 			nfp_flow->nfp_tun_ipv4_addr = dst;
667 			nfp_tunnel_add_ipv4_off(app, dst);
668 		}
669 	}
670 
671 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
672 	    key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
673 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
674 			struct nfp_flower_ipv6_udp_tun *udp_match;
675 			struct nfp_ipv6_addr_entry *entry;
676 			struct in6_addr *dst;
677 
678 			nfp_flower_compile_ipv6_udp_tun((void *)ext,
679 							(void *)msk, rule);
680 			udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
681 			dst = &udp_match->ipv6.dst;
682 			ext += sizeof(struct nfp_flower_ipv6_udp_tun);
683 			msk += sizeof(struct nfp_flower_ipv6_udp_tun);
684 
685 			entry = nfp_tunnel_add_ipv6_off(app, dst);
686 			if (!entry)
687 				return -EOPNOTSUPP;
688 
689 			nfp_flow->nfp_tun_ipv6 = entry;
690 		} else {
691 			__be32 dst;
692 
693 			nfp_flower_compile_ipv4_udp_tun((void *)ext,
694 							(void *)msk, rule);
695 			dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
696 			ext += sizeof(struct nfp_flower_ipv4_udp_tun);
697 			msk += sizeof(struct nfp_flower_ipv4_udp_tun);
698 
699 			/* Store the tunnel destination in the rule data.
700 			 * This must be present and be an exact match.
701 			 */
702 			nfp_flow->nfp_tun_ipv4_addr = dst;
703 			nfp_tunnel_add_ipv4_off(app, dst);
704 		}
705 
706 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
707 			nfp_flower_compile_geneve_opt(ext, msk, rule);
708 		}
709 	}
710 
711 	/* Check that the flow key does not exceed the maximum limit.
712 	 * All structures in the key is multiples of 4 bytes, so use u32.
713 	 */
714 	ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
715 	if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
716 		NL_SET_ERR_MSG_MOD(extack,
717 				   "unsupported offload: flow key too long");
718 		return -EOPNOTSUPP;
719 	}
720 
721 	return 0;
722 }
723