• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 
9 #define to_fltr_conf_from_desc(p) \
10 	container_of(p, struct virtchnl_fdir_fltr_conf, input)
11 
12 #define ICE_FLOW_PROF_TYPE_S	0
13 #define ICE_FLOW_PROF_TYPE_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
14 #define ICE_FLOW_PROF_VSI_S	32
15 #define ICE_FLOW_PROF_VSI_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
16 
17 /* Flow profile ID format:
18  * [0:31] - flow type, flow + tun_offs
19  * [32:63] - VSI index
20  */
21 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
22 	((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
23 	      (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
24 
25 #define GTPU_TEID_OFFSET 4
26 #define GTPU_EH_QFI_OFFSET 1
27 #define GTPU_EH_QFI_MASK 0x3F
28 #define PFCP_S_OFFSET 0
29 #define PFCP_S_MASK 0x1
30 #define PFCP_PORT_NR 8805
31 
32 #define FDIR_INSET_FLAG_ESP_S 0
33 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
34 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
35 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
36 
37 enum ice_fdir_tunnel_type {
38 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
39 	ICE_FDIR_TUNNEL_TYPE_GTPU,
40 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
41 };
42 
43 struct virtchnl_fdir_fltr_conf {
44 	struct ice_fdir_fltr input;
45 	enum ice_fdir_tunnel_type ttype;
46 	u64 inset_flag;
47 	u32 flow_id;
48 };
49 
50 static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
51 	VIRTCHNL_PROTO_HDR_ETH,
52 	VIRTCHNL_PROTO_HDR_NONE,
53 };
54 
55 static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
56 	VIRTCHNL_PROTO_HDR_ETH,
57 	VIRTCHNL_PROTO_HDR_IPV4,
58 	VIRTCHNL_PROTO_HDR_NONE,
59 };
60 
61 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
62 	VIRTCHNL_PROTO_HDR_ETH,
63 	VIRTCHNL_PROTO_HDR_IPV4,
64 	VIRTCHNL_PROTO_HDR_TCP,
65 	VIRTCHNL_PROTO_HDR_NONE,
66 };
67 
68 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
69 	VIRTCHNL_PROTO_HDR_ETH,
70 	VIRTCHNL_PROTO_HDR_IPV4,
71 	VIRTCHNL_PROTO_HDR_UDP,
72 	VIRTCHNL_PROTO_HDR_NONE,
73 };
74 
75 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
76 	VIRTCHNL_PROTO_HDR_ETH,
77 	VIRTCHNL_PROTO_HDR_IPV4,
78 	VIRTCHNL_PROTO_HDR_SCTP,
79 	VIRTCHNL_PROTO_HDR_NONE,
80 };
81 
82 static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
83 	VIRTCHNL_PROTO_HDR_ETH,
84 	VIRTCHNL_PROTO_HDR_IPV6,
85 	VIRTCHNL_PROTO_HDR_NONE,
86 };
87 
88 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
89 	VIRTCHNL_PROTO_HDR_ETH,
90 	VIRTCHNL_PROTO_HDR_IPV6,
91 	VIRTCHNL_PROTO_HDR_TCP,
92 	VIRTCHNL_PROTO_HDR_NONE,
93 };
94 
95 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
96 	VIRTCHNL_PROTO_HDR_ETH,
97 	VIRTCHNL_PROTO_HDR_IPV6,
98 	VIRTCHNL_PROTO_HDR_UDP,
99 	VIRTCHNL_PROTO_HDR_NONE,
100 };
101 
102 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
103 	VIRTCHNL_PROTO_HDR_ETH,
104 	VIRTCHNL_PROTO_HDR_IPV6,
105 	VIRTCHNL_PROTO_HDR_SCTP,
106 	VIRTCHNL_PROTO_HDR_NONE,
107 };
108 
109 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
110 	VIRTCHNL_PROTO_HDR_ETH,
111 	VIRTCHNL_PROTO_HDR_IPV4,
112 	VIRTCHNL_PROTO_HDR_UDP,
113 	VIRTCHNL_PROTO_HDR_GTPU_IP,
114 	VIRTCHNL_PROTO_HDR_NONE,
115 };
116 
117 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
118 	VIRTCHNL_PROTO_HDR_ETH,
119 	VIRTCHNL_PROTO_HDR_IPV4,
120 	VIRTCHNL_PROTO_HDR_UDP,
121 	VIRTCHNL_PROTO_HDR_GTPU_IP,
122 	VIRTCHNL_PROTO_HDR_GTPU_EH,
123 	VIRTCHNL_PROTO_HDR_NONE,
124 };
125 
126 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
127 	VIRTCHNL_PROTO_HDR_ETH,
128 	VIRTCHNL_PROTO_HDR_IPV4,
129 	VIRTCHNL_PROTO_HDR_L2TPV3,
130 	VIRTCHNL_PROTO_HDR_NONE,
131 };
132 
133 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
134 	VIRTCHNL_PROTO_HDR_ETH,
135 	VIRTCHNL_PROTO_HDR_IPV6,
136 	VIRTCHNL_PROTO_HDR_L2TPV3,
137 	VIRTCHNL_PROTO_HDR_NONE,
138 };
139 
140 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
141 	VIRTCHNL_PROTO_HDR_ETH,
142 	VIRTCHNL_PROTO_HDR_IPV4,
143 	VIRTCHNL_PROTO_HDR_ESP,
144 	VIRTCHNL_PROTO_HDR_NONE,
145 };
146 
147 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
148 	VIRTCHNL_PROTO_HDR_ETH,
149 	VIRTCHNL_PROTO_HDR_IPV6,
150 	VIRTCHNL_PROTO_HDR_ESP,
151 	VIRTCHNL_PROTO_HDR_NONE,
152 };
153 
154 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
155 	VIRTCHNL_PROTO_HDR_ETH,
156 	VIRTCHNL_PROTO_HDR_IPV4,
157 	VIRTCHNL_PROTO_HDR_AH,
158 	VIRTCHNL_PROTO_HDR_NONE,
159 };
160 
161 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
162 	VIRTCHNL_PROTO_HDR_ETH,
163 	VIRTCHNL_PROTO_HDR_IPV6,
164 	VIRTCHNL_PROTO_HDR_AH,
165 	VIRTCHNL_PROTO_HDR_NONE,
166 };
167 
168 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
169 	VIRTCHNL_PROTO_HDR_ETH,
170 	VIRTCHNL_PROTO_HDR_IPV4,
171 	VIRTCHNL_PROTO_HDR_UDP,
172 	VIRTCHNL_PROTO_HDR_ESP,
173 	VIRTCHNL_PROTO_HDR_NONE,
174 };
175 
176 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
177 	VIRTCHNL_PROTO_HDR_ETH,
178 	VIRTCHNL_PROTO_HDR_IPV6,
179 	VIRTCHNL_PROTO_HDR_UDP,
180 	VIRTCHNL_PROTO_HDR_ESP,
181 	VIRTCHNL_PROTO_HDR_NONE,
182 };
183 
184 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
185 	VIRTCHNL_PROTO_HDR_ETH,
186 	VIRTCHNL_PROTO_HDR_IPV4,
187 	VIRTCHNL_PROTO_HDR_UDP,
188 	VIRTCHNL_PROTO_HDR_PFCP,
189 	VIRTCHNL_PROTO_HDR_NONE,
190 };
191 
192 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
193 	VIRTCHNL_PROTO_HDR_ETH,
194 	VIRTCHNL_PROTO_HDR_IPV6,
195 	VIRTCHNL_PROTO_HDR_UDP,
196 	VIRTCHNL_PROTO_HDR_PFCP,
197 	VIRTCHNL_PROTO_HDR_NONE,
198 };
199 
200 struct virtchnl_fdir_pattern_match_item {
201 	enum virtchnl_proto_hdr_type *list;
202 	u64 input_set;
203 	u64 *meta;
204 };
205 
206 static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
207 	{vc_pattern_ipv4,                     0,         NULL},
208 	{vc_pattern_ipv4_tcp,                 0,         NULL},
209 	{vc_pattern_ipv4_udp,                 0,         NULL},
210 	{vc_pattern_ipv4_sctp,                0,         NULL},
211 	{vc_pattern_ipv6,                     0,         NULL},
212 	{vc_pattern_ipv6_tcp,                 0,         NULL},
213 	{vc_pattern_ipv6_udp,                 0,         NULL},
214 	{vc_pattern_ipv6_sctp,                0,         NULL},
215 };
216 
217 static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
218 	{vc_pattern_ipv4,                     0,         NULL},
219 	{vc_pattern_ipv4_tcp,                 0,         NULL},
220 	{vc_pattern_ipv4_udp,                 0,         NULL},
221 	{vc_pattern_ipv4_sctp,                0,         NULL},
222 	{vc_pattern_ipv6,                     0,         NULL},
223 	{vc_pattern_ipv6_tcp,                 0,         NULL},
224 	{vc_pattern_ipv6_udp,                 0,         NULL},
225 	{vc_pattern_ipv6_sctp,                0,         NULL},
226 	{vc_pattern_ether,                    0,         NULL},
227 	{vc_pattern_ipv4_gtpu,                0,         NULL},
228 	{vc_pattern_ipv4_gtpu_eh,             0,         NULL},
229 	{vc_pattern_ipv4_l2tpv3,              0,         NULL},
230 	{vc_pattern_ipv6_l2tpv3,              0,         NULL},
231 	{vc_pattern_ipv4_esp,                 0,         NULL},
232 	{vc_pattern_ipv6_esp,                 0,         NULL},
233 	{vc_pattern_ipv4_ah,                  0,         NULL},
234 	{vc_pattern_ipv6_ah,                  0,         NULL},
235 	{vc_pattern_ipv4_nat_t_esp,           0,         NULL},
236 	{vc_pattern_ipv6_nat_t_esp,           0,         NULL},
237 	{vc_pattern_ipv4_pfcp,                0,         NULL},
238 	{vc_pattern_ipv6_pfcp,                0,         NULL},
239 };
240 
241 struct virtchnl_fdir_inset_map {
242 	enum virtchnl_proto_hdr_field field;
243 	enum ice_flow_field fld;
244 	u64 flag;
245 	u64 mask;
246 };
247 
248 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
249 	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
250 	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
251 	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
252 	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
253 	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
254 	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
255 	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
256 	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
257 	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
258 	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
259 	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
260 	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
261 	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
262 	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
263 	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
264 	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
265 	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
266 	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
267 	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
268 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
269 		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
270 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
271 		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
272 	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
273 	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
274 	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
275 };
276 
277 /**
278  * ice_vc_fdir_param_check
279  * @vf: pointer to the VF structure
280  * @vsi_id: VF relative VSI ID
281  *
282  * Check for the valid VSI ID, PF's state and VF's state
283  *
284  * Return: 0 on success, and -EINVAL on error.
285  */
286 static int
ice_vc_fdir_param_check(struct ice_vf * vf,u16 vsi_id)287 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
288 {
289 	struct ice_pf *pf = vf->pf;
290 
291 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
292 		return -EINVAL;
293 
294 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
295 		return -EINVAL;
296 
297 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
298 		return -EINVAL;
299 
300 	if (vsi_id != vf->lan_vsi_num)
301 		return -EINVAL;
302 
303 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
304 		return -EINVAL;
305 
306 	if (!pf->vsi[vf->lan_vsi_idx])
307 		return -EINVAL;
308 
309 	return 0;
310 }
311 
312 /**
313  * ice_vf_start_ctrl_vsi
314  * @vf: pointer to the VF structure
315  *
316  * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
317  *
318  * Return: 0 on success, and other on error.
319  */
ice_vf_start_ctrl_vsi(struct ice_vf * vf)320 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
321 {
322 	struct ice_pf *pf = vf->pf;
323 	struct ice_vsi *ctrl_vsi;
324 	struct device *dev;
325 	int err;
326 
327 	dev = ice_pf_to_dev(pf);
328 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
329 		return -EEXIST;
330 
331 	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
332 	if (!ctrl_vsi) {
333 		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
334 			vf->vf_id);
335 		return -ENOMEM;
336 	}
337 
338 	err = ice_vsi_open_ctrl(ctrl_vsi);
339 	if (err) {
340 		dev_dbg(dev, "Could not open control VSI for VF %d\n",
341 			vf->vf_id);
342 		goto err_vsi_open;
343 	}
344 
345 	return 0;
346 
347 err_vsi_open:
348 	ice_vsi_release(ctrl_vsi);
349 	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
350 		pf->vsi[vf->ctrl_vsi_idx] = NULL;
351 		vf->ctrl_vsi_idx = ICE_NO_VSI;
352 	}
353 	return err;
354 }
355 
356 /**
357  * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
358  * @vf: pointer to the VF structure
359  * @flow: filter flow type
360  *
361  * Return: 0 on success, and other on error.
362  */
363 static int
ice_vc_fdir_alloc_prof(struct ice_vf * vf,enum ice_fltr_ptype flow)364 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
365 {
366 	struct ice_vf_fdir *fdir = &vf->fdir;
367 
368 	if (!fdir->fdir_prof) {
369 		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
370 					       ICE_FLTR_PTYPE_MAX,
371 					       sizeof(*fdir->fdir_prof),
372 					       GFP_KERNEL);
373 		if (!fdir->fdir_prof)
374 			return -ENOMEM;
375 	}
376 
377 	if (!fdir->fdir_prof[flow]) {
378 		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
379 						     sizeof(**fdir->fdir_prof),
380 						     GFP_KERNEL);
381 		if (!fdir->fdir_prof[flow])
382 			return -ENOMEM;
383 	}
384 
385 	return 0;
386 }
387 
388 /**
389  * ice_vc_fdir_free_prof - free profile for this filter flow type
390  * @vf: pointer to the VF structure
391  * @flow: filter flow type
392  */
393 static void
ice_vc_fdir_free_prof(struct ice_vf * vf,enum ice_fltr_ptype flow)394 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
395 {
396 	struct ice_vf_fdir *fdir = &vf->fdir;
397 
398 	if (!fdir->fdir_prof)
399 		return;
400 
401 	if (!fdir->fdir_prof[flow])
402 		return;
403 
404 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
405 	fdir->fdir_prof[flow] = NULL;
406 }
407 
408 /**
409  * ice_vc_fdir_free_prof_all - free all the profile for this VF
410  * @vf: pointer to the VF structure
411  */
ice_vc_fdir_free_prof_all(struct ice_vf * vf)412 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
413 {
414 	struct ice_vf_fdir *fdir = &vf->fdir;
415 	enum ice_fltr_ptype flow;
416 
417 	if (!fdir->fdir_prof)
418 		return;
419 
420 	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
421 		ice_vc_fdir_free_prof(vf, flow);
422 
423 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
424 	fdir->fdir_prof = NULL;
425 }
426 
427 /**
428  * ice_vc_fdir_parse_flow_fld
429  * @proto_hdr: virtual channel protocol filter header
430  * @conf: FDIR configuration for each filter
431  * @fld: field type array
432  * @fld_cnt: field counter
433  *
434  * Parse the virtual channel filter header and store them into field type array
435  *
436  * Return: 0 on success, and other on error.
437  */
438 static int
ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr * proto_hdr,struct virtchnl_fdir_fltr_conf * conf,enum ice_flow_field * fld,int * fld_cnt)439 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
440 			   struct virtchnl_fdir_fltr_conf *conf,
441 			   enum ice_flow_field *fld, int *fld_cnt)
442 {
443 	struct virtchnl_proto_hdr hdr;
444 	u32 i;
445 
446 	memcpy(&hdr, proto_hdr, sizeof(hdr));
447 
448 	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
449 	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
450 		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
451 			if (fdir_inset_map[i].mask &&
452 			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
453 			     fdir_inset_map[i].flag))
454 				continue;
455 
456 			fld[*fld_cnt] = fdir_inset_map[i].fld;
457 			*fld_cnt += 1;
458 			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
459 				return -EINVAL;
460 			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
461 						     fdir_inset_map[i].field);
462 		}
463 
464 	return 0;
465 }
466 
467 /**
468  * ice_vc_fdir_set_flow_fld
469  * @vf: pointer to the VF structure
470  * @fltr: virtual channel add cmd buffer
471  * @conf: FDIR configuration for each filter
472  * @seg: array of one or more packet segments that describe the flow
473  *
474  * Parse the virtual channel add msg buffer's field vector and store them into
475  * flow's packet segment field
476  *
477  * Return: 0 on success, and other on error.
478  */
479 static int
ice_vc_fdir_set_flow_fld(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf,struct ice_flow_seg_info * seg)480 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
481 			 struct virtchnl_fdir_fltr_conf *conf,
482 			 struct ice_flow_seg_info *seg)
483 {
484 	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
485 	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
486 	struct device *dev = ice_pf_to_dev(vf->pf);
487 	struct virtchnl_proto_hdrs *proto;
488 	int fld_cnt = 0;
489 	int i;
490 
491 	proto = &rule->proto_hdrs;
492 	for (i = 0; i < proto->count; i++) {
493 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
494 		int ret;
495 
496 		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
497 		if (ret)
498 			return ret;
499 	}
500 
501 	if (fld_cnt == 0) {
502 		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
503 		return -EINVAL;
504 	}
505 
506 	for (i = 0; i < fld_cnt; i++)
507 		ice_flow_set_fld(seg, fld[i],
508 				 ICE_FLOW_FLD_OFF_INVAL,
509 				 ICE_FLOW_FLD_OFF_INVAL,
510 				 ICE_FLOW_FLD_OFF_INVAL, false);
511 
512 	return 0;
513 }
514 
515 /**
516  * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
517  * @vf: pointer to the VF structure
518  * @conf: FDIR configuration for each filter
519  * @seg: array of one or more packet segments that describe the flow
520  *
521  * Return: 0 on success, and other on error.
522  */
523 static int
ice_vc_fdir_set_flow_hdr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,struct ice_flow_seg_info * seg)524 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
525 			 struct virtchnl_fdir_fltr_conf *conf,
526 			 struct ice_flow_seg_info *seg)
527 {
528 	enum ice_fltr_ptype flow = conf->input.flow_type;
529 	enum ice_fdir_tunnel_type ttype = conf->ttype;
530 	struct device *dev = ice_pf_to_dev(vf->pf);
531 
532 	switch (flow) {
533 	case ICE_FLTR_PTYPE_NON_IP_L2:
534 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
535 		break;
536 	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
537 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
538 				  ICE_FLOW_SEG_HDR_IPV4 |
539 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
540 		break;
541 	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
542 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
543 				  ICE_FLOW_SEG_HDR_IPV4 |
544 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
545 		break;
546 	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
547 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
548 				  ICE_FLOW_SEG_HDR_IPV4 |
549 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
550 		break;
551 	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
552 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
553 				  ICE_FLOW_SEG_HDR_IPV4 |
554 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
555 		break;
556 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
557 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
558 				  ICE_FLOW_SEG_HDR_IPV4 |
559 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
560 		break;
561 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
562 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
563 				  ICE_FLOW_SEG_HDR_IPV4 |
564 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
565 		break;
566 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
567 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
568 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
569 		break;
570 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
571 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
572 				  ICE_FLOW_SEG_HDR_IPV4 |
573 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
574 		break;
575 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
576 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
577 				  ICE_FLOW_SEG_HDR_IPV4 |
578 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
579 		break;
580 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
581 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
582 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
583 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
584 		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
585 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
586 					  ICE_FLOW_SEG_HDR_IPV4 |
587 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
588 		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
589 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
590 					  ICE_FLOW_SEG_HDR_GTPU_IP |
591 					  ICE_FLOW_SEG_HDR_IPV4 |
592 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
593 		} else {
594 			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
595 				flow, vf->vf_id);
596 			return -EINVAL;
597 		}
598 		break;
599 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
600 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
601 				  ICE_FLOW_SEG_HDR_IPV4 |
602 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
603 		break;
604 	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
605 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
606 				  ICE_FLOW_SEG_HDR_IPV6 |
607 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
608 		break;
609 	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
610 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
611 				  ICE_FLOW_SEG_HDR_IPV6 |
612 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
613 		break;
614 	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
615 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
616 				  ICE_FLOW_SEG_HDR_IPV6 |
617 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
618 		break;
619 	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
620 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
621 				  ICE_FLOW_SEG_HDR_IPV6 |
622 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
623 		break;
624 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
625 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
626 				  ICE_FLOW_SEG_HDR_IPV6 |
627 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
628 		break;
629 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
630 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
631 				  ICE_FLOW_SEG_HDR_IPV6 |
632 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
633 		break;
634 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
635 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
636 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
637 		break;
638 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
639 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
640 				  ICE_FLOW_SEG_HDR_IPV6 |
641 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
642 		break;
643 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
644 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
645 				  ICE_FLOW_SEG_HDR_IPV6 |
646 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
647 		break;
648 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
649 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
650 				  ICE_FLOW_SEG_HDR_IPV6 |
651 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
652 		break;
653 	default:
654 		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
655 			flow, vf->vf_id);
656 		return -EINVAL;
657 	}
658 
659 	return 0;
660 }
661 
662 /**
663  * ice_vc_fdir_rem_prof - remove profile for this filter flow type
664  * @vf: pointer to the VF structure
665  * @flow: filter flow type
666  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
667  */
668 static void
ice_vc_fdir_rem_prof(struct ice_vf * vf,enum ice_fltr_ptype flow,int tun)669 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
670 {
671 	struct ice_vf_fdir *fdir = &vf->fdir;
672 	struct ice_fd_hw_prof *vf_prof;
673 	struct ice_pf *pf = vf->pf;
674 	struct ice_vsi *vf_vsi;
675 	struct device *dev;
676 	struct ice_hw *hw;
677 	u64 prof_id;
678 	int i;
679 
680 	dev = ice_pf_to_dev(pf);
681 	hw = &pf->hw;
682 	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
683 		return;
684 
685 	vf_prof = fdir->fdir_prof[flow];
686 
687 	vf_vsi = pf->vsi[vf->lan_vsi_idx];
688 	if (!vf_vsi) {
689 		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
690 		return;
691 	}
692 
693 	if (!fdir->prof_entry_cnt[flow][tun])
694 		return;
695 
696 	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
697 				   flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
698 
699 	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
700 		if (vf_prof->entry_h[i][tun]) {
701 			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
702 
703 			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
704 			ice_flow_rem_entry(hw, ICE_BLK_FD,
705 					   vf_prof->entry_h[i][tun]);
706 			vf_prof->entry_h[i][tun] = 0;
707 		}
708 
709 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
710 	devm_kfree(dev, vf_prof->fdir_seg[tun]);
711 	vf_prof->fdir_seg[tun] = NULL;
712 
713 	for (i = 0; i < vf_prof->cnt; i++)
714 		vf_prof->vsi_h[i] = 0;
715 
716 	fdir->prof_entry_cnt[flow][tun] = 0;
717 }
718 
719 /**
720  * ice_vc_fdir_rem_prof_all - remove profile for this VF
721  * @vf: pointer to the VF structure
722  */
ice_vc_fdir_rem_prof_all(struct ice_vf * vf)723 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
724 {
725 	enum ice_fltr_ptype flow;
726 
727 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
728 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
729 		ice_vc_fdir_rem_prof(vf, flow, 0);
730 		ice_vc_fdir_rem_prof(vf, flow, 1);
731 	}
732 }
733 
734 /**
735  * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
736  * @fdir: pointer to the VF FDIR structure
737  */
ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir * fdir)738 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
739 {
740 	enum ice_fltr_ptype flow;
741 
742 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
743 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
744 		fdir->fdir_fltr_cnt[flow][0] = 0;
745 		fdir->fdir_fltr_cnt[flow][1] = 0;
746 	}
747 }
748 
749 /**
750  * ice_vc_fdir_has_prof_conflict
751  * @vf: pointer to the VF structure
752  * @conf: FDIR configuration for each filter
753  *
754  * Check if @conf has conflicting profile with existing profiles
755  *
756  * Return: true on success, and false on error.
757  */
758 static bool
ice_vc_fdir_has_prof_conflict(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf)759 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
760 			      struct virtchnl_fdir_fltr_conf *conf)
761 {
762 	struct ice_fdir_fltr *desc;
763 
764 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
765 		struct virtchnl_fdir_fltr_conf *existing_conf;
766 		enum ice_fltr_ptype flow_type_a, flow_type_b;
767 		struct ice_fdir_fltr *a, *b;
768 
769 		existing_conf = to_fltr_conf_from_desc(desc);
770 		a = &existing_conf->input;
771 		b = &conf->input;
772 		flow_type_a = a->flow_type;
773 		flow_type_b = b->flow_type;
774 
775 		/* No need to compare two rules with different tunnel types or
776 		 * with the same protocol type.
777 		 */
778 		if (existing_conf->ttype != conf->ttype ||
779 		    flow_type_a == flow_type_b)
780 			continue;
781 
782 		switch (flow_type_a) {
783 		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
784 		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
785 		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
786 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
787 				return true;
788 			break;
789 		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
790 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
791 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
792 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
793 				return true;
794 			break;
795 		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
796 		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
797 		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
798 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
799 				return true;
800 			break;
801 		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
802 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
803 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
804 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
805 				return true;
806 			break;
807 		default:
808 			break;
809 		}
810 	}
811 
812 	return false;
813 }
814 
815 /**
816  * ice_vc_fdir_write_flow_prof
817  * @vf: pointer to the VF structure
818  * @flow: filter flow type
819  * @seg: array of one or more packet segments that describe the flow
820  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
821  *
822  * Write the flow's profile config and packet segment into the hardware
823  *
824  * Return: 0 on success, and other on error.
825  */
826 static int
ice_vc_fdir_write_flow_prof(struct ice_vf * vf,enum ice_fltr_ptype flow,struct ice_flow_seg_info * seg,int tun)827 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
828 			    struct ice_flow_seg_info *seg, int tun)
829 {
830 	struct ice_vf_fdir *fdir = &vf->fdir;
831 	struct ice_vsi *vf_vsi, *ctrl_vsi;
832 	struct ice_flow_seg_info *old_seg;
833 	struct ice_flow_prof *prof = NULL;
834 	struct ice_fd_hw_prof *vf_prof;
835 	enum ice_status status;
836 	struct device *dev;
837 	struct ice_pf *pf;
838 	struct ice_hw *hw;
839 	u64 entry1_h = 0;
840 	u64 entry2_h = 0;
841 	u64 prof_id;
842 	int ret;
843 
844 	pf = vf->pf;
845 	dev = ice_pf_to_dev(pf);
846 	hw = &pf->hw;
847 	vf_vsi = pf->vsi[vf->lan_vsi_idx];
848 	if (!vf_vsi)
849 		return -EINVAL;
850 
851 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
852 	if (!ctrl_vsi)
853 		return -EINVAL;
854 
855 	vf_prof = fdir->fdir_prof[flow];
856 	old_seg = vf_prof->fdir_seg[tun];
857 	if (old_seg) {
858 		if (!memcmp(old_seg, seg, sizeof(*seg))) {
859 			dev_dbg(dev, "Duplicated profile for VF %d!\n",
860 				vf->vf_id);
861 			return -EEXIST;
862 		}
863 
864 		if (fdir->fdir_fltr_cnt[flow][tun]) {
865 			ret = -EINVAL;
866 			dev_dbg(dev, "Input set conflicts for VF %d\n",
867 				vf->vf_id);
868 			goto err_exit;
869 		}
870 
871 		/* remove previously allocated profile */
872 		ice_vc_fdir_rem_prof(vf, flow, tun);
873 	}
874 
875 	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
876 				   tun ? ICE_FLTR_PTYPE_MAX : 0);
877 
878 	status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
879 				   tun + 1, &prof);
880 	ret = ice_status_to_errno(status);
881 	if (ret) {
882 		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
883 			flow, vf->vf_id);
884 		goto err_exit;
885 	}
886 
887 	status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
888 				    vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
889 				    seg, &entry1_h);
890 	ret = ice_status_to_errno(status);
891 	if (ret) {
892 		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
893 			flow, vf->vf_id);
894 		goto err_prof;
895 	}
896 
897 	status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
898 				    ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
899 				    seg, &entry2_h);
900 	ret = ice_status_to_errno(status);
901 	if (ret) {
902 		dev_dbg(dev,
903 			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
904 			flow, vf->vf_id);
905 		goto err_entry_1;
906 	}
907 
908 	vf_prof->fdir_seg[tun] = seg;
909 	vf_prof->cnt = 0;
910 	fdir->prof_entry_cnt[flow][tun] = 0;
911 
912 	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
913 	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
914 	vf_prof->cnt++;
915 	fdir->prof_entry_cnt[flow][tun]++;
916 
917 	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
918 	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
919 	vf_prof->cnt++;
920 	fdir->prof_entry_cnt[flow][tun]++;
921 
922 	return 0;
923 
924 err_entry_1:
925 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
926 			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
927 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
928 err_prof:
929 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
930 err_exit:
931 	return ret;
932 }
933 
934 /**
935  * ice_vc_fdir_config_input_set
936  * @vf: pointer to the VF structure
937  * @fltr: virtual channel add cmd buffer
938  * @conf: FDIR configuration for each filter
939  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
940  *
941  * Config the input set type and value for virtual channel add msg buffer
942  *
943  * Return: 0 on success, and other on error.
944  */
945 static int
ice_vc_fdir_config_input_set(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf,int tun)946 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
947 			     struct virtchnl_fdir_fltr_conf *conf, int tun)
948 {
949 	struct ice_fdir_fltr *input = &conf->input;
950 	struct device *dev = ice_pf_to_dev(vf->pf);
951 	struct ice_flow_seg_info *seg;
952 	enum ice_fltr_ptype flow;
953 	int ret;
954 
955 	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
956 	if (ret) {
957 		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
958 			vf->vf_id);
959 		return ret;
960 	}
961 
962 	flow = input->flow_type;
963 	ret = ice_vc_fdir_alloc_prof(vf, flow);
964 	if (ret) {
965 		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
966 		return ret;
967 	}
968 
969 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
970 	if (!seg)
971 		return -ENOMEM;
972 
973 	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
974 	if (ret) {
975 		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
976 		goto err_exit;
977 	}
978 
979 	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
980 	if (ret) {
981 		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
982 		goto err_exit;
983 	}
984 
985 	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
986 	if (ret == -EEXIST) {
987 		devm_kfree(dev, seg);
988 	} else if (ret) {
989 		dev_dbg(dev, "Write flow profile for VF %d failed\n",
990 			vf->vf_id);
991 		goto err_exit;
992 	}
993 
994 	return 0;
995 
996 err_exit:
997 	devm_kfree(dev, seg);
998 	return ret;
999 }
1000 
1001 /**
1002  * ice_vc_fdir_match_pattern
1003  * @fltr: virtual channel add cmd buffer
1004  * @type: virtual channel protocol filter header type
1005  *
1006  * Matching the header type by comparing fltr and type's value.
1007  *
1008  * Return: true on success, and false on error.
1009  */
1010 static bool
ice_vc_fdir_match_pattern(struct virtchnl_fdir_add * fltr,enum virtchnl_proto_hdr_type * type)1011 ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
1012 			  enum virtchnl_proto_hdr_type *type)
1013 {
1014 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1015 	int i = 0;
1016 
1017 	while ((i < proto->count) &&
1018 	       (*type == proto->proto_hdr[i].type) &&
1019 	       (*type != VIRTCHNL_PROTO_HDR_NONE)) {
1020 		type++;
1021 		i++;
1022 	}
1023 
1024 	return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
1025 }
1026 
1027 /**
1028  * ice_vc_fdir_get_pattern - get while list pattern
1029  * @vf: pointer to the VF info
1030  * @len: filter list length
1031  *
1032  * Return: pointer to allowed filter list
1033  */
1034 static const struct virtchnl_fdir_pattern_match_item *
ice_vc_fdir_get_pattern(struct ice_vf * vf,int * len)1035 ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
1036 {
1037 	const struct virtchnl_fdir_pattern_match_item *item;
1038 	struct ice_pf *pf = vf->pf;
1039 	struct ice_hw *hw;
1040 
1041 	hw = &pf->hw;
1042 	if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
1043 		     sizeof(hw->active_pkg_name))) {
1044 		item = vc_fdir_pattern_comms;
1045 		*len = ARRAY_SIZE(vc_fdir_pattern_comms);
1046 	} else {
1047 		item = vc_fdir_pattern_os;
1048 		*len = ARRAY_SIZE(vc_fdir_pattern_os);
1049 	}
1050 
1051 	return item;
1052 }
1053 
1054 /**
1055  * ice_vc_fdir_search_pattern
1056  * @vf: pointer to the VF info
1057  * @fltr: virtual channel add cmd buffer
1058  *
1059  * Search for matched pattern from supported pattern list
1060  *
1061  * Return: 0 on success, and other on error.
1062  */
1063 static int
ice_vc_fdir_search_pattern(struct ice_vf * vf,struct virtchnl_fdir_add * fltr)1064 ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
1065 {
1066 	const struct virtchnl_fdir_pattern_match_item *pattern;
1067 	int len, i;
1068 
1069 	pattern = ice_vc_fdir_get_pattern(vf, &len);
1070 
1071 	for (i = 0; i < len; i++)
1072 		if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
1073 			return 0;
1074 
1075 	return -EINVAL;
1076 }
1077 
1078 /**
1079  * ice_vc_fdir_parse_pattern
1080  * @vf: pointer to the VF info
1081  * @fltr: virtual channel add cmd buffer
1082  * @conf: FDIR configuration for each filter
1083  *
1084  * Parse the virtual channel filter's pattern and store them into conf
1085  *
1086  * Return: 0 on success, and other on error.
1087  */
1088 static int
ice_vc_fdir_parse_pattern(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1089 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1090 			  struct virtchnl_fdir_fltr_conf *conf)
1091 {
1092 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1093 	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
1094 	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
1095 	struct device *dev = ice_pf_to_dev(vf->pf);
1096 	struct ice_fdir_fltr *input = &conf->input;
1097 	int i;
1098 
1099 	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1100 		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
1101 			proto->count, vf->vf_id);
1102 		return -EINVAL;
1103 	}
1104 
1105 	for (i = 0; i < proto->count; i++) {
1106 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
1107 		struct ip_esp_hdr *esph;
1108 		struct ip_auth_hdr *ah;
1109 		struct sctphdr *sctph;
1110 		struct ipv6hdr *ip6h;
1111 		struct udphdr *udph;
1112 		struct tcphdr *tcph;
1113 		struct ethhdr *eth;
1114 		struct iphdr *iph;
1115 		u8 s_field;
1116 		u8 *rawh;
1117 
1118 		switch (hdr->type) {
1119 		case VIRTCHNL_PROTO_HDR_ETH:
1120 			eth = (struct ethhdr *)hdr->buffer;
1121 			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1122 
1123 			if (hdr->field_selector)
1124 				input->ext_data.ether_type = eth->h_proto;
1125 			break;
1126 		case VIRTCHNL_PROTO_HDR_IPV4:
1127 			iph = (struct iphdr *)hdr->buffer;
1128 			l3 = VIRTCHNL_PROTO_HDR_IPV4;
1129 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1130 
1131 			if (hdr->field_selector) {
1132 				input->ip.v4.src_ip = iph->saddr;
1133 				input->ip.v4.dst_ip = iph->daddr;
1134 				input->ip.v4.tos = iph->tos;
1135 				input->ip.v4.proto = iph->protocol;
1136 			}
1137 			break;
1138 		case VIRTCHNL_PROTO_HDR_IPV6:
1139 			ip6h = (struct ipv6hdr *)hdr->buffer;
1140 			l3 = VIRTCHNL_PROTO_HDR_IPV6;
1141 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1142 
1143 			if (hdr->field_selector) {
1144 				memcpy(input->ip.v6.src_ip,
1145 				       ip6h->saddr.in6_u.u6_addr8,
1146 				       sizeof(ip6h->saddr));
1147 				memcpy(input->ip.v6.dst_ip,
1148 				       ip6h->daddr.in6_u.u6_addr8,
1149 				       sizeof(ip6h->daddr));
1150 				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
1151 						  (ip6h->flow_lbl[0] >> 4);
1152 				input->ip.v6.proto = ip6h->nexthdr;
1153 			}
1154 			break;
1155 		case VIRTCHNL_PROTO_HDR_TCP:
1156 			tcph = (struct tcphdr *)hdr->buffer;
1157 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1158 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1159 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1160 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1161 
1162 			if (hdr->field_selector) {
1163 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1164 					input->ip.v4.src_port = tcph->source;
1165 					input->ip.v4.dst_port = tcph->dest;
1166 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1167 					input->ip.v6.src_port = tcph->source;
1168 					input->ip.v6.dst_port = tcph->dest;
1169 				}
1170 			}
1171 			break;
1172 		case VIRTCHNL_PROTO_HDR_UDP:
1173 			udph = (struct udphdr *)hdr->buffer;
1174 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1175 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1176 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1177 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1178 
1179 			if (hdr->field_selector) {
1180 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1181 					input->ip.v4.src_port = udph->source;
1182 					input->ip.v4.dst_port = udph->dest;
1183 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1184 					input->ip.v6.src_port = udph->source;
1185 					input->ip.v6.dst_port = udph->dest;
1186 				}
1187 			}
1188 			break;
1189 		case VIRTCHNL_PROTO_HDR_SCTP:
1190 			sctph = (struct sctphdr *)hdr->buffer;
1191 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1192 				input->flow_type =
1193 					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1194 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1195 				input->flow_type =
1196 					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1197 
1198 			if (hdr->field_selector) {
1199 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1200 					input->ip.v4.src_port = sctph->source;
1201 					input->ip.v4.dst_port = sctph->dest;
1202 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1203 					input->ip.v6.src_port = sctph->source;
1204 					input->ip.v6.dst_port = sctph->dest;
1205 				}
1206 			}
1207 			break;
1208 		case VIRTCHNL_PROTO_HDR_L2TPV3:
1209 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1210 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
1211 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1212 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
1213 
1214 			if (hdr->field_selector)
1215 				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
1216 			break;
1217 		case VIRTCHNL_PROTO_HDR_ESP:
1218 			esph = (struct ip_esp_hdr *)hdr->buffer;
1219 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1220 			    l4 == VIRTCHNL_PROTO_HDR_UDP)
1221 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
1222 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1223 				 l4 == VIRTCHNL_PROTO_HDR_UDP)
1224 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
1225 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1226 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
1227 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
1228 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1229 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
1230 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
1231 
1232 			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
1233 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
1234 			else
1235 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
1236 
1237 			if (hdr->field_selector) {
1238 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1239 					input->ip.v4.sec_parm_idx = esph->spi;
1240 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1241 					input->ip.v6.sec_parm_idx = esph->spi;
1242 			}
1243 			break;
1244 		case VIRTCHNL_PROTO_HDR_AH:
1245 			ah = (struct ip_auth_hdr *)hdr->buffer;
1246 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1247 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
1248 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1249 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
1250 
1251 			if (hdr->field_selector) {
1252 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1253 					input->ip.v4.sec_parm_idx = ah->spi;
1254 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1255 					input->ip.v6.sec_parm_idx = ah->spi;
1256 			}
1257 			break;
1258 		case VIRTCHNL_PROTO_HDR_PFCP:
1259 			rawh = (u8 *)hdr->buffer;
1260 			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
1261 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
1262 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
1263 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
1264 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
1265 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
1266 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
1267 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
1268 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
1269 
1270 			if (hdr->field_selector) {
1271 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1272 					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1273 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1274 					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1275 			}
1276 			break;
1277 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
1278 			rawh = (u8 *)hdr->buffer;
1279 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1280 
1281 			if (hdr->field_selector)
1282 				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1283 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1284 			break;
1285 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
1286 			rawh = (u8 *)hdr->buffer;
1287 
1288 			if (hdr->field_selector)
1289 				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1290 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1291 			break;
1292 		default:
1293 			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1294 				hdr->type, vf->vf_id);
1295 			return -EINVAL;
1296 		}
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 /**
1303  * ice_vc_fdir_parse_action
1304  * @vf: pointer to the VF info
1305  * @fltr: virtual channel add cmd buffer
1306  * @conf: FDIR configuration for each filter
1307  *
1308  * Parse the virtual channel filter's action and store them into conf
1309  *
1310  * Return: 0 on success, and other on error.
1311  */
1312 static int
ice_vc_fdir_parse_action(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1313 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1314 			 struct virtchnl_fdir_fltr_conf *conf)
1315 {
1316 	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1317 	struct device *dev = ice_pf_to_dev(vf->pf);
1318 	struct ice_fdir_fltr *input = &conf->input;
1319 	u32 dest_num = 0;
1320 	u32 mark_num = 0;
1321 	int i;
1322 
1323 	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1324 		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1325 			as->count, vf->vf_id);
1326 		return -EINVAL;
1327 	}
1328 
1329 	for (i = 0; i < as->count; i++) {
1330 		struct virtchnl_filter_action *action = &as->actions[i];
1331 
1332 		switch (action->type) {
1333 		case VIRTCHNL_ACTION_PASSTHRU:
1334 			dest_num++;
1335 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1336 			break;
1337 		case VIRTCHNL_ACTION_DROP:
1338 			dest_num++;
1339 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1340 			break;
1341 		case VIRTCHNL_ACTION_QUEUE:
1342 			dest_num++;
1343 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1344 			input->q_index = action->act_conf.queue.index;
1345 			break;
1346 		case VIRTCHNL_ACTION_Q_REGION:
1347 			dest_num++;
1348 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1349 			input->q_index = action->act_conf.queue.index;
1350 			input->q_region = action->act_conf.queue.region;
1351 			break;
1352 		case VIRTCHNL_ACTION_MARK:
1353 			mark_num++;
1354 			input->fltr_id = action->act_conf.mark_id;
1355 			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1356 			break;
1357 		default:
1358 			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1359 				action->type, vf->vf_id);
1360 			return -EINVAL;
1361 		}
1362 	}
1363 
1364 	if (dest_num == 0 || dest_num >= 2) {
1365 		dev_dbg(dev, "Invalid destination action for VF %d\n",
1366 			vf->vf_id);
1367 		return -EINVAL;
1368 	}
1369 
1370 	if (mark_num >= 2) {
1371 		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1372 		return -EINVAL;
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 /**
1379  * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1380  * @vf: pointer to the VF info
1381  * @fltr: virtual channel add cmd buffer
1382  * @conf: FDIR configuration for each filter
1383  *
1384  * Return: 0 on success, and other on error.
1385  */
1386 static int
ice_vc_validate_fdir_fltr(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1387 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1388 			  struct virtchnl_fdir_fltr_conf *conf)
1389 {
1390 	int ret;
1391 
1392 	ret = ice_vc_fdir_search_pattern(vf, fltr);
1393 	if (ret)
1394 		return ret;
1395 
1396 	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1397 	if (ret)
1398 		return ret;
1399 
1400 	return ice_vc_fdir_parse_action(vf, fltr, conf);
1401 }
1402 
1403 /**
1404  * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1405  * @conf_a: FDIR configuration for filter a
1406  * @conf_b: FDIR configuration for filter b
1407  *
1408  * Return: 0 on success, and other on error.
1409  */
1410 static bool
ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf * conf_a,struct virtchnl_fdir_fltr_conf * conf_b)1411 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1412 		       struct virtchnl_fdir_fltr_conf *conf_b)
1413 {
1414 	struct ice_fdir_fltr *a = &conf_a->input;
1415 	struct ice_fdir_fltr *b = &conf_b->input;
1416 
1417 	if (conf_a->ttype != conf_b->ttype)
1418 		return false;
1419 	if (a->flow_type != b->flow_type)
1420 		return false;
1421 	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1422 		return false;
1423 	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1424 		return false;
1425 	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1426 		return false;
1427 	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1428 		return false;
1429 	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1430 		return false;
1431 	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1432 		return false;
1433 	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1434 		return false;
1435 	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1436 		return false;
1437 
1438 	return true;
1439 }
1440 
1441 /**
1442  * ice_vc_fdir_is_dup_fltr
1443  * @vf: pointer to the VF info
1444  * @conf: FDIR configuration for each filter
1445  *
1446  * Check if there is duplicated rule with same conf value
1447  *
1448  * Return: 0 true success, and false on error.
1449  */
1450 static bool
ice_vc_fdir_is_dup_fltr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf)1451 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1452 {
1453 	struct ice_fdir_fltr *desc;
1454 	bool ret;
1455 
1456 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1457 		struct virtchnl_fdir_fltr_conf *node =
1458 				to_fltr_conf_from_desc(desc);
1459 
1460 		ret = ice_vc_fdir_comp_rules(node, conf);
1461 		if (ret)
1462 			return true;
1463 	}
1464 
1465 	return false;
1466 }
1467 
1468 /**
1469  * ice_vc_fdir_insert_entry
1470  * @vf: pointer to the VF info
1471  * @conf: FDIR configuration for each filter
1472  * @id: pointer to ID value allocated by driver
1473  *
1474  * Insert FDIR conf entry into list and allocate ID for this filter
1475  *
1476  * Return: 0 true success, and other on error.
1477  */
1478 static int
ice_vc_fdir_insert_entry(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,u32 * id)1479 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1480 			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1481 {
1482 	struct ice_fdir_fltr *input = &conf->input;
1483 	int i;
1484 
1485 	/* alloc ID corresponding with conf */
1486 	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1487 		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1488 	if (i < 0)
1489 		return -EINVAL;
1490 	*id = i;
1491 
1492 	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1493 	return 0;
1494 }
1495 
1496 /**
1497  * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1498  * @vf: pointer to the VF info
1499  * @conf: FDIR configuration for each filter
1500  * @id: filter rule's ID
1501  */
1502 static void
ice_vc_fdir_remove_entry(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,u32 id)1503 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1504 			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1505 {
1506 	struct ice_fdir_fltr *input = &conf->input;
1507 
1508 	idr_remove(&vf->fdir.fdir_rule_idr, id);
1509 	list_del(&input->fltr_node);
1510 }
1511 
1512 /**
1513  * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1514  * @vf: pointer to the VF info
1515  * @id: filter rule's ID
1516  *
1517  * Return: NULL on error, and other on success.
1518  */
1519 static struct virtchnl_fdir_fltr_conf *
ice_vc_fdir_lookup_entry(struct ice_vf * vf,u32 id)1520 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1521 {
1522 	return idr_find(&vf->fdir.fdir_rule_idr, id);
1523 }
1524 
1525 /**
1526  * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1527  * @vf: pointer to the VF info
1528  */
ice_vc_fdir_flush_entry(struct ice_vf * vf)1529 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1530 {
1531 	struct virtchnl_fdir_fltr_conf *conf;
1532 	struct ice_fdir_fltr *desc, *temp;
1533 
1534 	list_for_each_entry_safe(desc, temp,
1535 				 &vf->fdir.fdir_rule_list, fltr_node) {
1536 		conf = to_fltr_conf_from_desc(desc);
1537 		list_del(&desc->fltr_node);
1538 		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1539 	}
1540 }
1541 
1542 /**
1543  * ice_vc_fdir_write_fltr - write filter rule into hardware
1544  * @vf: pointer to the VF info
1545  * @conf: FDIR configuration for each filter
1546  * @add: true implies add rule, false implies del rules
1547  * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1548  *
1549  * Return: 0 on success, and other on error.
1550  */
ice_vc_fdir_write_fltr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,bool add,bool is_tun)1551 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1552 				  struct virtchnl_fdir_fltr_conf *conf,
1553 				  bool add, bool is_tun)
1554 {
1555 	struct ice_fdir_fltr *input = &conf->input;
1556 	struct ice_vsi *vsi, *ctrl_vsi;
1557 	struct ice_fltr_desc desc;
1558 	enum ice_status status;
1559 	struct device *dev;
1560 	struct ice_pf *pf;
1561 	struct ice_hw *hw;
1562 	int ret;
1563 	u8 *pkt;
1564 
1565 	pf = vf->pf;
1566 	dev = ice_pf_to_dev(pf);
1567 	hw = &pf->hw;
1568 	vsi = pf->vsi[vf->lan_vsi_idx];
1569 	if (!vsi) {
1570 		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1571 		return -EINVAL;
1572 	}
1573 
1574 	input->dest_vsi = vsi->idx;
1575 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1576 
1577 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1578 	if (!ctrl_vsi) {
1579 		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1580 		return -EINVAL;
1581 	}
1582 
1583 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1584 	if (!pkt)
1585 		return -ENOMEM;
1586 
1587 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1588 	status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1589 	ret = ice_status_to_errno(status);
1590 	if (ret) {
1591 		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1592 			vf->vf_id, input->flow_type);
1593 		goto err_free_pkt;
1594 	}
1595 
1596 	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1597 	if (ret)
1598 		goto err_free_pkt;
1599 
1600 	return 0;
1601 
1602 err_free_pkt:
1603 	devm_kfree(dev, pkt);
1604 	return ret;
1605 }
1606 
1607 /**
1608  * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1609  * @t: pointer to timer_list
1610  */
ice_vf_fdir_timer(struct timer_list * t)1611 static void ice_vf_fdir_timer(struct timer_list *t)
1612 {
1613 	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1614 	struct ice_vf_fdir_ctx *ctx_done;
1615 	struct ice_vf_fdir *fdir;
1616 	unsigned long flags;
1617 	struct ice_vf *vf;
1618 	struct ice_pf *pf;
1619 
1620 	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1621 	vf = container_of(fdir, struct ice_vf, fdir);
1622 	ctx_done = &fdir->ctx_done;
1623 	pf = vf->pf;
1624 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1625 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1626 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1627 		WARN_ON_ONCE(1);
1628 		return;
1629 	}
1630 
1631 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1632 
1633 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1634 	ctx_done->conf = ctx_irq->conf;
1635 	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1636 	ctx_done->v_opcode = ctx_irq->v_opcode;
1637 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1638 
1639 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1640 	ice_service_task_schedule(pf);
1641 }
1642 
1643 /**
1644  * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1645  * @ctrl_vsi: pointer to a VF's CTRL VSI
1646  * @rx_desc: pointer to FDIR Rx queue descriptor
1647  */
1648 void
ice_vc_fdir_irq_handler(struct ice_vsi * ctrl_vsi,union ice_32b_rx_flex_desc * rx_desc)1649 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1650 			union ice_32b_rx_flex_desc *rx_desc)
1651 {
1652 	struct ice_pf *pf = ctrl_vsi->back;
1653 	struct ice_vf_fdir_ctx *ctx_done;
1654 	struct ice_vf_fdir_ctx *ctx_irq;
1655 	struct ice_vf_fdir *fdir;
1656 	unsigned long flags;
1657 	struct device *dev;
1658 	struct ice_vf *vf;
1659 	int ret;
1660 
1661 	vf = &pf->vf[ctrl_vsi->vf_id];
1662 
1663 	fdir = &vf->fdir;
1664 	ctx_done = &fdir->ctx_done;
1665 	ctx_irq = &fdir->ctx_irq;
1666 	dev = ice_pf_to_dev(pf);
1667 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1668 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1669 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1670 		WARN_ON_ONCE(1);
1671 		return;
1672 	}
1673 
1674 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1675 
1676 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1677 	ctx_done->conf = ctx_irq->conf;
1678 	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1679 	ctx_done->v_opcode = ctx_irq->v_opcode;
1680 	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1681 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1682 
1683 	ret = del_timer(&ctx_irq->rx_tmr);
1684 	if (!ret)
1685 		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1686 
1687 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1688 	ice_service_task_schedule(pf);
1689 }
1690 
1691 /**
1692  * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1693  * @vf: pointer to the VF info
1694  */
ice_vf_fdir_dump_info(struct ice_vf * vf)1695 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1696 {
1697 	struct ice_vsi *vf_vsi;
1698 	u32 fd_size, fd_cnt;
1699 	struct device *dev;
1700 	struct ice_pf *pf;
1701 	struct ice_hw *hw;
1702 	u16 vsi_num;
1703 
1704 	pf = vf->pf;
1705 	hw = &pf->hw;
1706 	dev = ice_pf_to_dev(pf);
1707 	vf_vsi = pf->vsi[vf->lan_vsi_idx];
1708 	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1709 
1710 	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1711 	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1712 	dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
1713 		vf->vf_id,
1714 		(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1715 		(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
1716 		(fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1717 		(fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
1718 }
1719 
1720 /**
1721  * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1722  * @vf: pointer to the VF info
1723  * @ctx: FDIR context info for post processing
1724  * @status: virtchnl FDIR program status
1725  *
1726  * Return: 0 on success, and other on error.
1727  */
1728 static int
ice_vf_verify_rx_desc(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status * status)1729 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1730 		      enum virtchnl_fdir_prgm_status *status)
1731 {
1732 	struct device *dev = ice_pf_to_dev(vf->pf);
1733 	u32 stat_err, error, prog_id;
1734 	int ret;
1735 
1736 	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1737 	if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
1738 	    ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
1739 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1740 		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1741 		ret = -EINVAL;
1742 		goto err_exit;
1743 	}
1744 
1745 	prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
1746 		ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
1747 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1748 	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1749 		dev_err(dev, "VF %d: Desc show add, but ctx not",
1750 			vf->vf_id);
1751 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1752 		ret = -EINVAL;
1753 		goto err_exit;
1754 	}
1755 
1756 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1757 	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1758 		dev_err(dev, "VF %d: Desc show del, but ctx not",
1759 			vf->vf_id);
1760 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1761 		ret = -EINVAL;
1762 		goto err_exit;
1763 	}
1764 
1765 	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
1766 		ICE_FXD_FLTR_WB_QW1_FAIL_S;
1767 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1768 		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1769 			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1770 				vf->vf_id);
1771 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1772 		} else {
1773 			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1774 				vf->vf_id);
1775 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1776 		}
1777 		ret = -EINVAL;
1778 		goto err_exit;
1779 	}
1780 
1781 	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
1782 		ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
1783 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1784 		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1785 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1786 		ret = -EINVAL;
1787 		goto err_exit;
1788 	}
1789 
1790 	*status = VIRTCHNL_FDIR_SUCCESS;
1791 
1792 	return 0;
1793 
1794 err_exit:
1795 	ice_vf_fdir_dump_info(vf);
1796 	return ret;
1797 }
1798 
1799 /**
1800  * ice_vc_add_fdir_fltr_post
1801  * @vf: pointer to the VF structure
1802  * @ctx: FDIR context info for post processing
1803  * @status: virtchnl FDIR program status
1804  * @success: true implies success, false implies failure
1805  *
1806  * Post process for flow director add command. If success, then do post process
1807  * and send back success msg by virtchnl. Otherwise, do context reversion and
1808  * send back failure msg by virtchnl.
1809  *
1810  * Return: 0 on success, and other on error.
1811  */
1812 static int
ice_vc_add_fdir_fltr_post(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status status,bool success)1813 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1814 			  enum virtchnl_fdir_prgm_status status,
1815 			  bool success)
1816 {
1817 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1818 	struct device *dev = ice_pf_to_dev(vf->pf);
1819 	enum virtchnl_status_code v_ret;
1820 	struct virtchnl_fdir_add *resp;
1821 	int ret, len, is_tun;
1822 
1823 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1824 	len = sizeof(*resp);
1825 	resp = kzalloc(len, GFP_KERNEL);
1826 	if (!resp) {
1827 		len = 0;
1828 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1829 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1830 		goto err_exit;
1831 	}
1832 
1833 	if (!success)
1834 		goto err_exit;
1835 
1836 	is_tun = 0;
1837 	resp->status = status;
1838 	resp->flow_id = conf->flow_id;
1839 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1840 
1841 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1842 				    (u8 *)resp, len);
1843 	kfree(resp);
1844 
1845 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1846 		vf->vf_id, conf->flow_id,
1847 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1848 		"add" : "del");
1849 	return ret;
1850 
1851 err_exit:
1852 	if (resp)
1853 		resp->status = status;
1854 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1855 	devm_kfree(dev, conf);
1856 
1857 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1858 				    (u8 *)resp, len);
1859 	kfree(resp);
1860 	return ret;
1861 }
1862 
1863 /**
1864  * ice_vc_del_fdir_fltr_post
1865  * @vf: pointer to the VF structure
1866  * @ctx: FDIR context info for post processing
1867  * @status: virtchnl FDIR program status
1868  * @success: true implies success, false implies failure
1869  *
1870  * Post process for flow director del command. If success, then do post process
1871  * and send back success msg by virtchnl. Otherwise, do context reversion and
1872  * send back failure msg by virtchnl.
1873  *
1874  * Return: 0 on success, and other on error.
1875  */
1876 static int
ice_vc_del_fdir_fltr_post(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status status,bool success)1877 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1878 			  enum virtchnl_fdir_prgm_status status,
1879 			  bool success)
1880 {
1881 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1882 	struct device *dev = ice_pf_to_dev(vf->pf);
1883 	enum virtchnl_status_code v_ret;
1884 	struct virtchnl_fdir_del *resp;
1885 	int ret, len, is_tun;
1886 
1887 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1888 	len = sizeof(*resp);
1889 	resp = kzalloc(len, GFP_KERNEL);
1890 	if (!resp) {
1891 		len = 0;
1892 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1893 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1894 		goto err_exit;
1895 	}
1896 
1897 	if (!success)
1898 		goto err_exit;
1899 
1900 	is_tun = 0;
1901 	resp->status = status;
1902 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1903 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1904 
1905 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1906 				    (u8 *)resp, len);
1907 	kfree(resp);
1908 
1909 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1910 		vf->vf_id, conf->flow_id,
1911 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1912 		"add" : "del");
1913 	devm_kfree(dev, conf);
1914 	return ret;
1915 
1916 err_exit:
1917 	if (resp)
1918 		resp->status = status;
1919 	if (success)
1920 		devm_kfree(dev, conf);
1921 
1922 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1923 				    (u8 *)resp, len);
1924 	kfree(resp);
1925 	return ret;
1926 }
1927 
1928 /**
1929  * ice_flush_fdir_ctx
1930  * @pf: pointer to the PF structure
1931  *
1932  * Flush all the pending event on ctx_done list and process them.
1933  */
ice_flush_fdir_ctx(struct ice_pf * pf)1934 void ice_flush_fdir_ctx(struct ice_pf *pf)
1935 {
1936 	int i;
1937 
1938 	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1939 		return;
1940 
1941 	ice_for_each_vf(pf, i) {
1942 		struct device *dev = ice_pf_to_dev(pf);
1943 		enum virtchnl_fdir_prgm_status status;
1944 		struct ice_vf *vf = &pf->vf[i];
1945 		struct ice_vf_fdir_ctx *ctx;
1946 		unsigned long flags;
1947 		int ret;
1948 
1949 		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1950 			continue;
1951 
1952 		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1953 			continue;
1954 
1955 		ctx = &vf->fdir.ctx_done;
1956 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1957 		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1958 			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1959 			continue;
1960 		}
1961 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1962 
1963 		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1964 		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1965 			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1966 			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1967 				vf->vf_id);
1968 			goto err_exit;
1969 		}
1970 
1971 		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1972 		if (ret)
1973 			goto err_exit;
1974 
1975 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1976 			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1977 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1978 			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1979 		else
1980 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1981 
1982 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1983 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1984 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1985 		continue;
1986 err_exit:
1987 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1988 			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1989 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1990 			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1991 		else
1992 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1993 
1994 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1995 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1996 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1997 	}
1998 }
1999 
2000 /**
2001  * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
2002  * @vf: pointer to the VF structure
2003  * @conf: FDIR configuration for each filter
2004  * @v_opcode: virtual channel operation code
2005  *
2006  * Return: 0 on success, and other on error.
2007  */
2008 static int
ice_vc_fdir_set_irq_ctx(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,enum virtchnl_ops v_opcode)2009 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
2010 			enum virtchnl_ops v_opcode)
2011 {
2012 	struct device *dev = ice_pf_to_dev(vf->pf);
2013 	struct ice_vf_fdir_ctx *ctx;
2014 	unsigned long flags;
2015 
2016 	ctx = &vf->fdir.ctx_irq;
2017 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
2018 	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
2019 	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
2020 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
2021 		dev_dbg(dev, "VF %d: Last request is still in progress\n",
2022 			vf->vf_id);
2023 		return -EBUSY;
2024 	}
2025 	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
2026 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
2027 
2028 	ctx->conf = conf;
2029 	ctx->v_opcode = v_opcode;
2030 	ctx->stat = ICE_FDIR_CTX_READY;
2031 	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
2032 
2033 	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
2034 
2035 	return 0;
2036 }
2037 
2038 /**
2039  * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
2040  * @vf: pointer to the VF structure
2041  *
2042  * Return: 0 on success, and other on error.
2043  */
ice_vc_fdir_clear_irq_ctx(struct ice_vf * vf)2044 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
2045 {
2046 	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
2047 	unsigned long flags;
2048 
2049 	del_timer(&ctx->rx_tmr);
2050 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
2051 	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
2052 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
2053 }
2054 
2055 /**
2056  * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
2057  * @vf: pointer to the VF info
2058  * @msg: pointer to the msg buffer
2059  *
2060  * Return: 0 on success, and other on error.
2061  */
ice_vc_add_fdir_fltr(struct ice_vf * vf,u8 * msg)2062 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
2063 {
2064 	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
2065 	struct virtchnl_fdir_add *stat = NULL;
2066 	struct virtchnl_fdir_fltr_conf *conf;
2067 	enum virtchnl_status_code v_ret;
2068 	struct device *dev;
2069 	struct ice_pf *pf;
2070 	int is_tun = 0;
2071 	int len = 0;
2072 	int ret;
2073 
2074 	pf = vf->pf;
2075 	dev = ice_pf_to_dev(pf);
2076 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2077 	if (ret) {
2078 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2079 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2080 		goto err_exit;
2081 	}
2082 
2083 	ret = ice_vf_start_ctrl_vsi(vf);
2084 	if (ret && (ret != -EEXIST)) {
2085 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2086 		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
2087 			vf->vf_id, ret);
2088 		goto err_exit;
2089 	}
2090 
2091 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2092 	if (!stat) {
2093 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2094 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2095 		goto err_exit;
2096 	}
2097 
2098 	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
2099 	if (!conf) {
2100 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2101 		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
2102 		goto err_exit;
2103 	}
2104 
2105 	len = sizeof(*stat);
2106 	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
2107 	if (ret) {
2108 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2109 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
2110 		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
2111 		goto err_free_conf;
2112 	}
2113 
2114 	if (fltr->validate_only) {
2115 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2116 		stat->status = VIRTCHNL_FDIR_SUCCESS;
2117 		devm_kfree(dev, conf);
2118 		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
2119 					    v_ret, (u8 *)stat, len);
2120 		goto exit;
2121 	}
2122 
2123 	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
2124 	if (ret) {
2125 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2126 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
2127 		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
2128 			vf->vf_id, ret);
2129 		goto err_free_conf;
2130 	}
2131 
2132 	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
2133 	if (ret) {
2134 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2135 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
2136 		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
2137 			vf->vf_id);
2138 		goto err_free_conf;
2139 	}
2140 
2141 	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2142 	if (ret) {
2143 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2144 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2145 		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
2146 		goto err_free_conf;
2147 	}
2148 
2149 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
2150 	if (ret) {
2151 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2152 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2153 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2154 		goto err_rem_entry;
2155 	}
2156 
2157 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
2158 	if (ret) {
2159 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2160 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2161 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2162 			vf->vf_id, ret);
2163 		goto err_clr_irq;
2164 	}
2165 
2166 exit:
2167 	kfree(stat);
2168 	return ret;
2169 
2170 err_clr_irq:
2171 	ice_vc_fdir_clear_irq_ctx(vf);
2172 err_rem_entry:
2173 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2174 err_free_conf:
2175 	devm_kfree(dev, conf);
2176 err_exit:
2177 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
2178 				    (u8 *)stat, len);
2179 	kfree(stat);
2180 	return ret;
2181 }
2182 
2183 /**
2184  * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
2185  * @vf: pointer to the VF info
2186  * @msg: pointer to the msg buffer
2187  *
2188  * Return: 0 on success, and other on error.
2189  */
ice_vc_del_fdir_fltr(struct ice_vf * vf,u8 * msg)2190 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
2191 {
2192 	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
2193 	struct virtchnl_fdir_del *stat = NULL;
2194 	struct virtchnl_fdir_fltr_conf *conf;
2195 	enum virtchnl_status_code v_ret;
2196 	struct device *dev;
2197 	struct ice_pf *pf;
2198 	int is_tun = 0;
2199 	int len = 0;
2200 	int ret;
2201 
2202 	pf = vf->pf;
2203 	dev = ice_pf_to_dev(pf);
2204 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2205 	if (ret) {
2206 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2207 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2208 		goto err_exit;
2209 	}
2210 
2211 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2212 	if (!stat) {
2213 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2214 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2215 		goto err_exit;
2216 	}
2217 
2218 	len = sizeof(*stat);
2219 
2220 	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
2221 	if (!conf) {
2222 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2223 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
2224 		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
2225 			vf->vf_id, fltr->flow_id);
2226 		goto err_exit;
2227 	}
2228 
2229 	/* Just return failure when ctrl_vsi idx is invalid */
2230 	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
2231 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2232 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2233 		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
2234 		goto err_exit;
2235 	}
2236 
2237 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
2238 	if (ret) {
2239 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2240 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2241 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2242 		goto err_exit;
2243 	}
2244 
2245 	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
2246 	if (ret) {
2247 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2248 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2249 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2250 			vf->vf_id, ret);
2251 		goto err_del_tmr;
2252 	}
2253 
2254 	kfree(stat);
2255 
2256 	return ret;
2257 
2258 err_del_tmr:
2259 	ice_vc_fdir_clear_irq_ctx(vf);
2260 err_exit:
2261 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2262 				    (u8 *)stat, len);
2263 	kfree(stat);
2264 	return ret;
2265 }
2266 
2267 /**
2268  * ice_vf_fdir_init - init FDIR resource for VF
2269  * @vf: pointer to the VF info
2270  */
ice_vf_fdir_init(struct ice_vf * vf)2271 void ice_vf_fdir_init(struct ice_vf *vf)
2272 {
2273 	struct ice_vf_fdir *fdir = &vf->fdir;
2274 
2275 	idr_init(&fdir->fdir_rule_idr);
2276 	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2277 
2278 	spin_lock_init(&fdir->ctx_lock);
2279 	fdir->ctx_irq.flags = 0;
2280 	fdir->ctx_done.flags = 0;
2281 	ice_vc_fdir_reset_cnt_all(fdir);
2282 }
2283 
2284 /**
2285  * ice_vf_fdir_exit - destroy FDIR resource for VF
2286  * @vf: pointer to the VF info
2287  */
ice_vf_fdir_exit(struct ice_vf * vf)2288 void ice_vf_fdir_exit(struct ice_vf *vf)
2289 {
2290 	ice_vc_fdir_flush_entry(vf);
2291 	idr_destroy(&vf->fdir.fdir_rule_idr);
2292 	ice_vc_fdir_rem_prof_all(vf);
2293 	ice_vc_fdir_free_prof_all(vf);
2294 }
2295