• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "en/params.h"
36 #include "en/xsk/umem.h"
37 
38 struct mlx5e_ethtool_rule {
39 	struct list_head             list;
40 	struct ethtool_rx_flow_spec  flow_spec;
41 	struct mlx5_flow_handle	     *rule;
42 	struct mlx5e_ethtool_table   *eth_ft;
43 };
44 
put_flow_table(struct mlx5e_ethtool_table * eth_ft)45 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
46 {
47 	if (!--eth_ft->num_rules) {
48 		mlx5_destroy_flow_table(eth_ft->ft);
49 		eth_ft->ft = NULL;
50 	}
51 }
52 
53 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
54 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
55 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
56 #define MLX5E_ETHTOOL_NUM_GROUPS  10
get_flow_table(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,int num_tuples)57 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
58 						  struct ethtool_rx_flow_spec *fs,
59 						  int num_tuples)
60 {
61 	struct mlx5e_ethtool_table *eth_ft;
62 	struct mlx5_flow_namespace *ns;
63 	struct mlx5_flow_table *ft;
64 	int max_tuples;
65 	int table_size;
66 	int prio;
67 
68 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
69 	case TCP_V4_FLOW:
70 	case UDP_V4_FLOW:
71 	case TCP_V6_FLOW:
72 	case UDP_V6_FLOW:
73 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
74 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
75 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
76 		break;
77 	case IP_USER_FLOW:
78 	case IPV6_USER_FLOW:
79 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
80 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
81 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
82 		break;
83 	case ETHER_FLOW:
84 		max_tuples = ETHTOOL_NUM_L2_FTS;
85 		prio = max_tuples - num_tuples;
86 		eth_ft = &priv->fs.ethtool.l2_ft[prio];
87 		prio += MLX5E_ETHTOOL_L2_PRIO;
88 		break;
89 	default:
90 		return ERR_PTR(-EINVAL);
91 	}
92 
93 	eth_ft->num_rules++;
94 	if (eth_ft->ft)
95 		return eth_ft;
96 
97 	ns = mlx5_get_flow_namespace(priv->mdev,
98 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
99 	if (!ns)
100 		return ERR_PTR(-EOPNOTSUPP);
101 
102 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
103 						       flow_table_properties_nic_receive.log_max_ft_size)),
104 			   MLX5E_ETHTOOL_NUM_ENTRIES);
105 	ft = mlx5_create_auto_grouped_flow_table(ns, prio,
106 						 table_size,
107 						 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
108 	if (IS_ERR(ft))
109 		return (void *)ft;
110 
111 	eth_ft->ft = ft;
112 	return eth_ft;
113 }
114 
mask_spec(u8 * mask,u8 * val,size_t size)115 static void mask_spec(u8 *mask, u8 *val, size_t size)
116 {
117 	unsigned int i;
118 
119 	for (i = 0; i < size; i++, mask++, val++)
120 		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
121 }
122 
123 #define MLX5E_FTE_SET(header_p, fld, v)  \
124 	MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
125 
126 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
127 	MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
128 
129 static void
set_ip4(void * headers_c,void * headers_v,__be32 ip4src_m,__be32 ip4src_v,__be32 ip4dst_m,__be32 ip4dst_v)130 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
131 	__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
132 {
133 	if (ip4src_m) {
134 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
135 		       &ip4src_v, sizeof(ip4src_v));
136 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
137 		       &ip4src_m, sizeof(ip4src_m));
138 	}
139 	if (ip4dst_m) {
140 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
141 		       &ip4dst_v, sizeof(ip4dst_v));
142 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
143 		       &ip4dst_m, sizeof(ip4dst_m));
144 	}
145 
146 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
147 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
148 }
149 
150 static void
set_ip6(void * headers_c,void * headers_v,__be32 ip6src_m[4],__be32 ip6src_v[4],__be32 ip6dst_m[4],__be32 ip6dst_v[4])151 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
152 	__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
153 {
154 	u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
155 
156 	if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
157 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
158 		       ip6src_v, ip6_sz);
159 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
160 		       ip6src_m, ip6_sz);
161 	}
162 	if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
163 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
164 		       ip6dst_v, ip6_sz);
165 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
166 		       ip6dst_m, ip6_sz);
167 	}
168 
169 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
170 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
171 }
172 
173 static void
set_tcp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)174 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
175 	__be16 pdst_m, __be16 pdst_v)
176 {
177 	if (psrc_m) {
178 		MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
179 		MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
180 	}
181 	if (pdst_m) {
182 		MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
183 		MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
184 	}
185 
186 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
187 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
188 }
189 
190 static void
set_udp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)191 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
192 	__be16 pdst_m, __be16 pdst_v)
193 {
194 	if (psrc_m) {
195 		MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
196 		MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
197 	}
198 
199 	if (pdst_m) {
200 		MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
201 		MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
202 	}
203 
204 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
205 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
206 }
207 
208 static void
parse_tcp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)209 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
210 {
211 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
212 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.tcp_ip4_spec;
213 
214 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
215 		l4_mask->ip4dst, l4_val->ip4dst);
216 
217 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
218 		l4_mask->pdst, l4_val->pdst);
219 }
220 
221 static void
parse_udp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)222 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
223 {
224 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
225 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.udp_ip4_spec;
226 
227 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
228 		l4_mask->ip4dst, l4_val->ip4dst);
229 
230 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
231 		l4_mask->pdst, l4_val->pdst);
232 }
233 
234 static void
parse_ip4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)235 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
236 {
237 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
238 	struct ethtool_usrip4_spec *l3_val  = &fs->h_u.usr_ip4_spec;
239 
240 	set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
241 		l3_mask->ip4dst, l3_val->ip4dst);
242 
243 	if (l3_mask->proto) {
244 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
245 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
246 	}
247 }
248 
249 static void
parse_ip6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)250 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
251 {
252 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
253 	struct ethtool_usrip6_spec *l3_val  = &fs->h_u.usr_ip6_spec;
254 
255 	set_ip6(headers_c, headers_v, l3_mask->ip6src,
256 		l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
257 
258 	if (l3_mask->l4_proto) {
259 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
260 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
261 	}
262 }
263 
264 static void
parse_tcp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)265 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
266 {
267 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
268 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.tcp_ip6_spec;
269 
270 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
271 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
272 
273 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
274 		l4_mask->pdst, l4_val->pdst);
275 }
276 
277 static void
parse_udp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)278 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
279 {
280 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
281 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.udp_ip6_spec;
282 
283 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
284 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
285 
286 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
287 		l4_mask->pdst, l4_val->pdst);
288 }
289 
290 static void
parse_ether(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)291 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
292 {
293 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
294 	struct ethhdr *eth_val = &fs->h_u.ether_spec;
295 
296 	mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
297 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
298 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
299 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
300 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
301 	MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
302 	MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
303 }
304 
305 static void
set_cvlan(void * headers_c,void * headers_v,__be16 vlan_tci)306 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
307 {
308 	MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
309 	MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
310 	MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
311 	MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
312 }
313 
314 static void
set_dmac(void * headers_c,void * headers_v,unsigned char m_dest[ETH_ALEN],unsigned char v_dest[ETH_ALEN])315 set_dmac(void *headers_c, void *headers_v,
316 	 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
317 {
318 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
319 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
320 }
321 
set_flow_attrs(u32 * match_c,u32 * match_v,struct ethtool_rx_flow_spec * fs)322 static int set_flow_attrs(u32 *match_c, u32 *match_v,
323 			  struct ethtool_rx_flow_spec *fs)
324 {
325 	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
326 					     outer_headers);
327 	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
328 					     outer_headers);
329 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
330 
331 	switch (flow_type) {
332 	case TCP_V4_FLOW:
333 		parse_tcp4(outer_headers_c, outer_headers_v, fs);
334 		break;
335 	case UDP_V4_FLOW:
336 		parse_udp4(outer_headers_c, outer_headers_v, fs);
337 		break;
338 	case IP_USER_FLOW:
339 		parse_ip4(outer_headers_c, outer_headers_v, fs);
340 		break;
341 	case TCP_V6_FLOW:
342 		parse_tcp6(outer_headers_c, outer_headers_v, fs);
343 		break;
344 	case UDP_V6_FLOW:
345 		parse_udp6(outer_headers_c, outer_headers_v, fs);
346 		break;
347 	case IPV6_USER_FLOW:
348 		parse_ip6(outer_headers_c, outer_headers_v, fs);
349 		break;
350 	case ETHER_FLOW:
351 		parse_ether(outer_headers_c, outer_headers_v, fs);
352 		break;
353 	default:
354 		return -EINVAL;
355 	}
356 
357 	if ((fs->flow_type & FLOW_EXT) &&
358 	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
359 		set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
360 
361 	if (fs->flow_type & FLOW_MAC_EXT &&
362 	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
363 		mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
364 		set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
365 			 fs->h_ext.h_dest);
366 	}
367 
368 	return 0;
369 }
370 
add_rule_to_list(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * rule)371 static void add_rule_to_list(struct mlx5e_priv *priv,
372 			     struct mlx5e_ethtool_rule *rule)
373 {
374 	struct mlx5e_ethtool_rule *iter;
375 	struct list_head *head = &priv->fs.ethtool.rules;
376 
377 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
378 		if (iter->flow_spec.location > rule->flow_spec.location)
379 			break;
380 		head = &iter->list;
381 	}
382 	priv->fs.ethtool.tot_num_rules++;
383 	list_add(&rule->list, head);
384 }
385 
outer_header_zero(u32 * match_criteria)386 static bool outer_header_zero(u32 *match_criteria)
387 {
388 	int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
389 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
390 					     outer_headers);
391 
392 	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
393 						  outer_headers_c + 1,
394 						  size - 1);
395 }
396 
397 static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct ethtool_rx_flow_spec * fs)398 add_ethtool_flow_rule(struct mlx5e_priv *priv,
399 		      struct mlx5_flow_table *ft,
400 		      struct ethtool_rx_flow_spec *fs)
401 {
402 	struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
403 	struct mlx5_flow_destination *dst = NULL;
404 	struct mlx5_flow_handle *rule;
405 	struct mlx5_flow_spec *spec;
406 	int err = 0;
407 
408 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
409 	if (!spec)
410 		return ERR_PTR(-ENOMEM);
411 	err = set_flow_attrs(spec->match_criteria, spec->match_value,
412 			     fs);
413 	if (err)
414 		goto free;
415 
416 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
417 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
418 	} else {
419 		struct mlx5e_params *params = &priv->channels.params;
420 		enum mlx5e_rq_group group;
421 		struct mlx5e_tir *tir;
422 		u16 ix;
423 
424 		mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
425 		tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir;
426 
427 		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
428 		if (!dst) {
429 			err = -ENOMEM;
430 			goto free;
431 		}
432 
433 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
434 		dst->tir_num = tir[ix].tirn;
435 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
436 	}
437 
438 	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
439 	spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
440 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
441 	if (IS_ERR(rule)) {
442 		err = PTR_ERR(rule);
443 		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
444 			   __func__, err);
445 		goto free;
446 	}
447 free:
448 	kvfree(spec);
449 	kfree(dst);
450 	return err ? ERR_PTR(err) : rule;
451 }
452 
del_ethtool_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule)453 static void del_ethtool_rule(struct mlx5e_priv *priv,
454 			     struct mlx5e_ethtool_rule *eth_rule)
455 {
456 	if (eth_rule->rule)
457 		mlx5_del_flow_rules(eth_rule->rule);
458 	list_del(&eth_rule->list);
459 	priv->fs.ethtool.tot_num_rules--;
460 	put_flow_table(eth_rule->eth_ft);
461 	kfree(eth_rule);
462 }
463 
find_ethtool_rule(struct mlx5e_priv * priv,int location)464 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
465 						    int location)
466 {
467 	struct mlx5e_ethtool_rule *iter;
468 
469 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
470 		if (iter->flow_spec.location == location)
471 			return iter;
472 	}
473 	return NULL;
474 }
475 
get_ethtool_rule(struct mlx5e_priv * priv,int location)476 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
477 						   int location)
478 {
479 	struct mlx5e_ethtool_rule *eth_rule;
480 
481 	eth_rule = find_ethtool_rule(priv, location);
482 	if (eth_rule)
483 		del_ethtool_rule(priv, eth_rule);
484 
485 	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
486 	if (!eth_rule)
487 		return ERR_PTR(-ENOMEM);
488 
489 	add_rule_to_list(priv, eth_rule);
490 	return eth_rule;
491 }
492 
493 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
494 
495 #define all_ones(field) (field == (__force typeof(field))-1)
496 #define all_zeros_or_all_ones(field)		\
497 	((field) == 0 || (field) == (__force typeof(field))-1)
498 
validate_ethter(struct ethtool_rx_flow_spec * fs)499 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
500 {
501 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
502 	int ntuples = 0;
503 
504 	if (!is_zero_ether_addr(eth_mask->h_dest))
505 		ntuples++;
506 	if (!is_zero_ether_addr(eth_mask->h_source))
507 		ntuples++;
508 	if (eth_mask->h_proto)
509 		ntuples++;
510 	return ntuples;
511 }
512 
validate_tcpudp4(struct ethtool_rx_flow_spec * fs)513 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
514 {
515 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
516 	int ntuples = 0;
517 
518 	if (l4_mask->tos)
519 		return -EINVAL;
520 
521 	if (l4_mask->ip4src)
522 		ntuples++;
523 	if (l4_mask->ip4dst)
524 		ntuples++;
525 	if (l4_mask->psrc)
526 		ntuples++;
527 	if (l4_mask->pdst)
528 		ntuples++;
529 	/* Flow is TCP/UDP */
530 	return ++ntuples;
531 }
532 
validate_ip4(struct ethtool_rx_flow_spec * fs)533 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
534 {
535 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
536 	int ntuples = 0;
537 
538 	if (l3_mask->l4_4_bytes || l3_mask->tos ||
539 	    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
540 		return -EINVAL;
541 	if (l3_mask->ip4src)
542 		ntuples++;
543 	if (l3_mask->ip4dst)
544 		ntuples++;
545 	if (l3_mask->proto)
546 		ntuples++;
547 	/* Flow is IPv4 */
548 	return ++ntuples;
549 }
550 
validate_ip6(struct ethtool_rx_flow_spec * fs)551 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
552 {
553 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
554 	int ntuples = 0;
555 
556 	if (l3_mask->l4_4_bytes || l3_mask->tclass)
557 		return -EINVAL;
558 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
559 		ntuples++;
560 
561 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
562 		ntuples++;
563 	if (l3_mask->l4_proto)
564 		ntuples++;
565 	/* Flow is IPv6 */
566 	return ++ntuples;
567 }
568 
validate_tcpudp6(struct ethtool_rx_flow_spec * fs)569 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
570 {
571 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
572 	int ntuples = 0;
573 
574 	if (l4_mask->tclass)
575 		return -EINVAL;
576 
577 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
578 		ntuples++;
579 
580 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
581 		ntuples++;
582 
583 	if (l4_mask->psrc)
584 		ntuples++;
585 	if (l4_mask->pdst)
586 		ntuples++;
587 	/* Flow is TCP/UDP */
588 	return ++ntuples;
589 }
590 
validate_vlan(struct ethtool_rx_flow_spec * fs)591 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
592 {
593 	if (fs->m_ext.vlan_etype ||
594 	    fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
595 		return -EINVAL;
596 
597 	if (fs->m_ext.vlan_tci &&
598 	    (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
599 		return -EINVAL;
600 
601 	return 1;
602 }
603 
validate_flow(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)604 static int validate_flow(struct mlx5e_priv *priv,
605 			 struct ethtool_rx_flow_spec *fs)
606 {
607 	int num_tuples = 0;
608 	int ret = 0;
609 
610 	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
611 		return -ENOSPC;
612 
613 	if (fs->ring_cookie != RX_CLS_FLOW_DISC)
614 		if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
615 					fs->ring_cookie))
616 			return -EINVAL;
617 
618 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
619 	case ETHER_FLOW:
620 		num_tuples += validate_ethter(fs);
621 		break;
622 	case TCP_V4_FLOW:
623 	case UDP_V4_FLOW:
624 		ret = validate_tcpudp4(fs);
625 		if (ret < 0)
626 			return ret;
627 		num_tuples += ret;
628 		break;
629 	case IP_USER_FLOW:
630 		ret = validate_ip4(fs);
631 		if (ret < 0)
632 			return ret;
633 		num_tuples += ret;
634 		break;
635 	case TCP_V6_FLOW:
636 	case UDP_V6_FLOW:
637 		ret = validate_tcpudp6(fs);
638 		if (ret < 0)
639 			return ret;
640 		num_tuples += ret;
641 		break;
642 	case IPV6_USER_FLOW:
643 		ret = validate_ip6(fs);
644 		if (ret < 0)
645 			return ret;
646 		num_tuples += ret;
647 		break;
648 	default:
649 		return -ENOTSUPP;
650 	}
651 	if ((fs->flow_type & FLOW_EXT)) {
652 		ret = validate_vlan(fs);
653 		if (ret < 0)
654 			return ret;
655 		num_tuples += ret;
656 	}
657 
658 	if (fs->flow_type & FLOW_MAC_EXT &&
659 	    !is_zero_ether_addr(fs->m_ext.h_dest))
660 		num_tuples++;
661 
662 	return num_tuples;
663 }
664 
665 static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)666 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
667 			   struct ethtool_rx_flow_spec *fs)
668 {
669 	struct mlx5e_ethtool_table *eth_ft;
670 	struct mlx5e_ethtool_rule *eth_rule;
671 	struct mlx5_flow_handle *rule;
672 	int num_tuples;
673 	int err;
674 
675 	num_tuples = validate_flow(priv, fs);
676 	if (num_tuples <= 0) {
677 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
678 			    __func__, num_tuples);
679 		return num_tuples;
680 	}
681 
682 	eth_ft = get_flow_table(priv, fs, num_tuples);
683 	if (IS_ERR(eth_ft))
684 		return PTR_ERR(eth_ft);
685 
686 	eth_rule = get_ethtool_rule(priv, fs->location);
687 	if (IS_ERR(eth_rule)) {
688 		put_flow_table(eth_ft);
689 		return PTR_ERR(eth_rule);
690 	}
691 
692 	eth_rule->flow_spec = *fs;
693 	eth_rule->eth_ft = eth_ft;
694 	if (!eth_ft->ft) {
695 		err = -EINVAL;
696 		goto del_ethtool_rule;
697 	}
698 	rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
699 	if (IS_ERR(rule)) {
700 		err = PTR_ERR(rule);
701 		goto del_ethtool_rule;
702 	}
703 
704 	eth_rule->rule = rule;
705 
706 	return 0;
707 
708 del_ethtool_rule:
709 	del_ethtool_rule(priv, eth_rule);
710 
711 	return err;
712 }
713 
714 static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv * priv,int location)715 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
716 {
717 	struct mlx5e_ethtool_rule *eth_rule;
718 	int err = 0;
719 
720 	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
721 		return -ENOSPC;
722 
723 	eth_rule = find_ethtool_rule(priv, location);
724 	if (!eth_rule) {
725 		err =  -ENOENT;
726 		goto out;
727 	}
728 
729 	del_ethtool_rule(priv, eth_rule);
730 out:
731 	return err;
732 }
733 
734 static int
mlx5e_ethtool_get_flow(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,int location)735 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
736 		       struct ethtool_rxnfc *info, int location)
737 {
738 	struct mlx5e_ethtool_rule *eth_rule;
739 
740 	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
741 		return -EINVAL;
742 
743 	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
744 		if (eth_rule->flow_spec.location == location) {
745 			info->fs = eth_rule->flow_spec;
746 			return 0;
747 		}
748 	}
749 
750 	return -ENOENT;
751 }
752 
753 static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)754 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
755 			    struct ethtool_rxnfc *info, u32 *rule_locs)
756 {
757 	int location = 0;
758 	int idx = 0;
759 	int err = 0;
760 
761 	info->data = MAX_NUM_OF_ETHTOOL_RULES;
762 	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
763 		err = mlx5e_ethtool_get_flow(priv, info, location);
764 		if (!err)
765 			rule_locs[idx++] = location;
766 		location++;
767 	}
768 	return err;
769 }
770 
mlx5e_ethtool_cleanup_steering(struct mlx5e_priv * priv)771 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
772 {
773 	struct mlx5e_ethtool_rule *iter;
774 	struct mlx5e_ethtool_rule *temp;
775 
776 	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
777 		del_ethtool_rule(priv, iter);
778 }
779 
mlx5e_ethtool_init_steering(struct mlx5e_priv * priv)780 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
781 {
782 	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
783 }
784 
flow_type_to_traffic_type(u32 flow_type)785 static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
786 {
787 	switch (flow_type) {
788 	case TCP_V4_FLOW:
789 		return  MLX5E_TT_IPV4_TCP;
790 	case TCP_V6_FLOW:
791 		return MLX5E_TT_IPV6_TCP;
792 	case UDP_V4_FLOW:
793 		return MLX5E_TT_IPV4_UDP;
794 	case UDP_V6_FLOW:
795 		return MLX5E_TT_IPV6_UDP;
796 	case AH_V4_FLOW:
797 		return MLX5E_TT_IPV4_IPSEC_AH;
798 	case AH_V6_FLOW:
799 		return MLX5E_TT_IPV6_IPSEC_AH;
800 	case ESP_V4_FLOW:
801 		return MLX5E_TT_IPV4_IPSEC_ESP;
802 	case ESP_V6_FLOW:
803 		return MLX5E_TT_IPV6_IPSEC_ESP;
804 	case IPV4_FLOW:
805 		return MLX5E_TT_IPV4;
806 	case IPV6_FLOW:
807 		return MLX5E_TT_IPV6;
808 	default:
809 		return MLX5E_NUM_INDIR_TIRS;
810 	}
811 }
812 
mlx5e_set_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)813 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
814 				  struct ethtool_rxnfc *nfc)
815 {
816 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
817 	enum mlx5e_traffic_types tt;
818 	u8 rx_hash_field = 0;
819 	void *in;
820 
821 	tt = flow_type_to_traffic_type(nfc->flow_type);
822 	if (tt == MLX5E_NUM_INDIR_TIRS)
823 		return -EINVAL;
824 
825 	/*  RSS does not support anything other than hashing to queues
826 	 *  on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
827 	 *  port.
828 	 */
829 	if (nfc->flow_type != TCP_V4_FLOW &&
830 	    nfc->flow_type != TCP_V6_FLOW &&
831 	    nfc->flow_type != UDP_V4_FLOW &&
832 	    nfc->flow_type != UDP_V6_FLOW)
833 		return -EOPNOTSUPP;
834 
835 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
836 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
837 		return -EOPNOTSUPP;
838 
839 	if (nfc->data & RXH_IP_SRC)
840 		rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
841 	if (nfc->data & RXH_IP_DST)
842 		rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
843 	if (nfc->data & RXH_L4_B_0_1)
844 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
845 	if (nfc->data & RXH_L4_B_2_3)
846 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
847 
848 	in = kvzalloc(inlen, GFP_KERNEL);
849 	if (!in)
850 		return -ENOMEM;
851 
852 	mutex_lock(&priv->state_lock);
853 
854 	if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
855 		goto out;
856 
857 	priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
858 	mlx5e_modify_tirs_hash(priv, in, inlen);
859 
860 out:
861 	mutex_unlock(&priv->state_lock);
862 	kvfree(in);
863 	return 0;
864 }
865 
mlx5e_get_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)866 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
867 				  struct ethtool_rxnfc *nfc)
868 {
869 	enum mlx5e_traffic_types tt;
870 	u32 hash_field = 0;
871 
872 	tt = flow_type_to_traffic_type(nfc->flow_type);
873 	if (tt == MLX5E_NUM_INDIR_TIRS)
874 		return -EINVAL;
875 
876 	hash_field = priv->rss_params.rx_hash_fields[tt];
877 	nfc->data = 0;
878 
879 	if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
880 		nfc->data |= RXH_IP_SRC;
881 	if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
882 		nfc->data |= RXH_IP_DST;
883 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
884 		nfc->data |= RXH_L4_B_0_1;
885 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
886 		nfc->data |= RXH_L4_B_2_3;
887 
888 	return 0;
889 }
890 
mlx5e_ethtool_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)891 int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
892 {
893 	struct mlx5e_priv *priv = netdev_priv(dev);
894 	int err = 0;
895 
896 	switch (cmd->cmd) {
897 	case ETHTOOL_SRXCLSRLINS:
898 		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
899 		break;
900 	case ETHTOOL_SRXCLSRLDEL:
901 		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
902 		break;
903 	case ETHTOOL_SRXFH:
904 		err = mlx5e_set_rss_hash_opt(priv, cmd);
905 		break;
906 	default:
907 		err = -EOPNOTSUPP;
908 		break;
909 	}
910 
911 	return err;
912 }
913 
mlx5e_ethtool_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rule_locs)914 int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
915 			    struct ethtool_rxnfc *info, u32 *rule_locs)
916 {
917 	struct mlx5e_priv *priv = netdev_priv(dev);
918 	int err = 0;
919 
920 	switch (info->cmd) {
921 	case ETHTOOL_GRXCLSRLCNT:
922 		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
923 		break;
924 	case ETHTOOL_GRXCLSRULE:
925 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
926 		break;
927 	case ETHTOOL_GRXCLSRLALL:
928 		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
929 		break;
930 	case ETHTOOL_GRXFH:
931 		err =  mlx5e_get_rss_hash_opt(priv, info);
932 		break;
933 	default:
934 		err = -EOPNOTSUPP;
935 		break;
936 	}
937 
938 	return err;
939 }
940 
941