• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "en/params.h"
36 #include "en/xsk/pool.h"
37 
38 struct mlx5e_ethtool_rule {
39 	struct list_head             list;
40 	struct ethtool_rx_flow_spec  flow_spec;
41 	struct mlx5_flow_handle	     *rule;
42 	struct mlx5e_ethtool_table   *eth_ft;
43 };
44 
put_flow_table(struct mlx5e_ethtool_table * eth_ft)45 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
46 {
47 	if (!--eth_ft->num_rules) {
48 		mlx5_destroy_flow_table(eth_ft->ft);
49 		eth_ft->ft = NULL;
50 	}
51 }
52 
53 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
54 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
55 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
56 #define MLX5E_ETHTOOL_NUM_GROUPS  10
get_flow_table(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,int num_tuples)57 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
58 						  struct ethtool_rx_flow_spec *fs,
59 						  int num_tuples)
60 {
61 	struct mlx5_flow_table_attr ft_attr = {};
62 	struct mlx5e_ethtool_table *eth_ft;
63 	struct mlx5_flow_namespace *ns;
64 	struct mlx5_flow_table *ft;
65 	int max_tuples;
66 	int table_size;
67 	int prio;
68 
69 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
70 	case TCP_V4_FLOW:
71 	case UDP_V4_FLOW:
72 	case TCP_V6_FLOW:
73 	case UDP_V6_FLOW:
74 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
75 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
76 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
77 		break;
78 	case IP_USER_FLOW:
79 	case IPV6_USER_FLOW:
80 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
81 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
82 		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
83 		break;
84 	case ETHER_FLOW:
85 		max_tuples = ETHTOOL_NUM_L2_FTS;
86 		prio = max_tuples - num_tuples;
87 		eth_ft = &priv->fs.ethtool.l2_ft[prio];
88 		prio += MLX5E_ETHTOOL_L2_PRIO;
89 		break;
90 	default:
91 		return ERR_PTR(-EINVAL);
92 	}
93 
94 	eth_ft->num_rules++;
95 	if (eth_ft->ft)
96 		return eth_ft;
97 
98 	ns = mlx5_get_flow_namespace(priv->mdev,
99 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
100 	if (!ns)
101 		return ERR_PTR(-EOPNOTSUPP);
102 
103 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
104 						       flow_table_properties_nic_receive.log_max_ft_size)),
105 			   MLX5E_ETHTOOL_NUM_ENTRIES);
106 
107 	ft_attr.prio = prio;
108 	ft_attr.max_fte = table_size;
109 	ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
110 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
111 	if (IS_ERR(ft))
112 		return (void *)ft;
113 
114 	eth_ft->ft = ft;
115 	return eth_ft;
116 }
117 
mask_spec(u8 * mask,u8 * val,size_t size)118 static void mask_spec(u8 *mask, u8 *val, size_t size)
119 {
120 	unsigned int i;
121 
122 	for (i = 0; i < size; i++, mask++, val++)
123 		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
124 }
125 
126 #define MLX5E_FTE_SET(header_p, fld, v)  \
127 	MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
128 
129 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
130 	MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
131 
132 static void
set_ip4(void * headers_c,void * headers_v,__be32 ip4src_m,__be32 ip4src_v,__be32 ip4dst_m,__be32 ip4dst_v)133 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
134 	__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
135 {
136 	if (ip4src_m) {
137 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
138 		       &ip4src_v, sizeof(ip4src_v));
139 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
140 		       &ip4src_m, sizeof(ip4src_m));
141 	}
142 	if (ip4dst_m) {
143 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
144 		       &ip4dst_v, sizeof(ip4dst_v));
145 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
146 		       &ip4dst_m, sizeof(ip4dst_m));
147 	}
148 
149 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
150 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
151 }
152 
153 static void
set_ip6(void * headers_c,void * headers_v,__be32 ip6src_m[4],__be32 ip6src_v[4],__be32 ip6dst_m[4],__be32 ip6dst_v[4])154 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
155 	__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
156 {
157 	u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
158 
159 	if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
160 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
161 		       ip6src_v, ip6_sz);
162 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
163 		       ip6src_m, ip6_sz);
164 	}
165 	if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
166 		memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
167 		       ip6dst_v, ip6_sz);
168 		memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
169 		       ip6dst_m, ip6_sz);
170 	}
171 
172 	MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
173 	MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
174 }
175 
176 static void
set_tcp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)177 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
178 	__be16 pdst_m, __be16 pdst_v)
179 {
180 	if (psrc_m) {
181 		MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
182 		MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
183 	}
184 	if (pdst_m) {
185 		MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
186 		MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
187 	}
188 
189 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
190 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
191 }
192 
193 static void
set_udp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)194 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
195 	__be16 pdst_m, __be16 pdst_v)
196 {
197 	if (psrc_m) {
198 		MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
199 		MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
200 	}
201 
202 	if (pdst_m) {
203 		MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
204 		MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
205 	}
206 
207 	MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
208 	MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
209 }
210 
211 static void
parse_tcp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)212 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
213 {
214 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
215 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.tcp_ip4_spec;
216 
217 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
218 		l4_mask->ip4dst, l4_val->ip4dst);
219 
220 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
221 		l4_mask->pdst, l4_val->pdst);
222 }
223 
224 static void
parse_udp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)225 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
226 {
227 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
228 	struct ethtool_tcpip4_spec *l4_val  = &fs->h_u.udp_ip4_spec;
229 
230 	set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
231 		l4_mask->ip4dst, l4_val->ip4dst);
232 
233 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
234 		l4_mask->pdst, l4_val->pdst);
235 }
236 
237 static void
parse_ip4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)238 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
239 {
240 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
241 	struct ethtool_usrip4_spec *l3_val  = &fs->h_u.usr_ip4_spec;
242 
243 	set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
244 		l3_mask->ip4dst, l3_val->ip4dst);
245 
246 	if (l3_mask->proto) {
247 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
248 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
249 	}
250 }
251 
252 static void
parse_ip6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)253 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
254 {
255 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
256 	struct ethtool_usrip6_spec *l3_val  = &fs->h_u.usr_ip6_spec;
257 
258 	set_ip6(headers_c, headers_v, l3_mask->ip6src,
259 		l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
260 
261 	if (l3_mask->l4_proto) {
262 		MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
263 		MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
264 	}
265 }
266 
267 static void
parse_tcp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)268 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
269 {
270 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
271 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.tcp_ip6_spec;
272 
273 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
274 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
275 
276 	set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
277 		l4_mask->pdst, l4_val->pdst);
278 }
279 
280 static void
parse_udp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)281 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
282 {
283 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
284 	struct ethtool_tcpip6_spec *l4_val  = &fs->h_u.udp_ip6_spec;
285 
286 	set_ip6(headers_c, headers_v, l4_mask->ip6src,
287 		l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
288 
289 	set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
290 		l4_mask->pdst, l4_val->pdst);
291 }
292 
293 static void
parse_ether(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)294 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
295 {
296 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
297 	struct ethhdr *eth_val = &fs->h_u.ether_spec;
298 
299 	mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
300 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
301 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
302 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
303 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
304 	MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
305 	MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
306 }
307 
308 static void
set_cvlan(void * headers_c,void * headers_v,__be16 vlan_tci)309 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
310 {
311 	MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
312 	MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
313 	MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
314 	MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
315 }
316 
317 static void
set_dmac(void * headers_c,void * headers_v,unsigned char m_dest[ETH_ALEN],unsigned char v_dest[ETH_ALEN])318 set_dmac(void *headers_c, void *headers_v,
319 	 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
320 {
321 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
322 	ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
323 }
324 
set_flow_attrs(u32 * match_c,u32 * match_v,struct ethtool_rx_flow_spec * fs)325 static int set_flow_attrs(u32 *match_c, u32 *match_v,
326 			  struct ethtool_rx_flow_spec *fs)
327 {
328 	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
329 					     outer_headers);
330 	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
331 					     outer_headers);
332 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
333 
334 	switch (flow_type) {
335 	case TCP_V4_FLOW:
336 		parse_tcp4(outer_headers_c, outer_headers_v, fs);
337 		break;
338 	case UDP_V4_FLOW:
339 		parse_udp4(outer_headers_c, outer_headers_v, fs);
340 		break;
341 	case IP_USER_FLOW:
342 		parse_ip4(outer_headers_c, outer_headers_v, fs);
343 		break;
344 	case TCP_V6_FLOW:
345 		parse_tcp6(outer_headers_c, outer_headers_v, fs);
346 		break;
347 	case UDP_V6_FLOW:
348 		parse_udp6(outer_headers_c, outer_headers_v, fs);
349 		break;
350 	case IPV6_USER_FLOW:
351 		parse_ip6(outer_headers_c, outer_headers_v, fs);
352 		break;
353 	case ETHER_FLOW:
354 		parse_ether(outer_headers_c, outer_headers_v, fs);
355 		break;
356 	default:
357 		return -EINVAL;
358 	}
359 
360 	if ((fs->flow_type & FLOW_EXT) &&
361 	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
362 		set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
363 
364 	if (fs->flow_type & FLOW_MAC_EXT &&
365 	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
366 		mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
367 		set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
368 			 fs->h_ext.h_dest);
369 	}
370 
371 	return 0;
372 }
373 
add_rule_to_list(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * rule)374 static void add_rule_to_list(struct mlx5e_priv *priv,
375 			     struct mlx5e_ethtool_rule *rule)
376 {
377 	struct mlx5e_ethtool_rule *iter;
378 	struct list_head *head = &priv->fs.ethtool.rules;
379 
380 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
381 		if (iter->flow_spec.location > rule->flow_spec.location)
382 			break;
383 		head = &iter->list;
384 	}
385 	priv->fs.ethtool.tot_num_rules++;
386 	list_add(&rule->list, head);
387 }
388 
outer_header_zero(u32 * match_criteria)389 static bool outer_header_zero(u32 *match_criteria)
390 {
391 	int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
392 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
393 					     outer_headers);
394 
395 	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
396 						  outer_headers_c + 1,
397 						  size - 1);
398 }
399 
400 static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct ethtool_rx_flow_spec * fs)401 add_ethtool_flow_rule(struct mlx5e_priv *priv,
402 		      struct mlx5_flow_table *ft,
403 		      struct ethtool_rx_flow_spec *fs)
404 {
405 	struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
406 	struct mlx5_flow_destination *dst = NULL;
407 	struct mlx5_flow_handle *rule;
408 	struct mlx5_flow_spec *spec;
409 	int err = 0;
410 
411 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
412 	if (!spec)
413 		return ERR_PTR(-ENOMEM);
414 	err = set_flow_attrs(spec->match_criteria, spec->match_value,
415 			     fs);
416 	if (err)
417 		goto free;
418 
419 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
420 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
421 	} else {
422 		struct mlx5e_params *params = &priv->channels.params;
423 		enum mlx5e_rq_group group;
424 		struct mlx5e_tir *tir;
425 		u16 ix;
426 
427 		mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
428 		tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir;
429 
430 		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
431 		if (!dst) {
432 			err = -ENOMEM;
433 			goto free;
434 		}
435 
436 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
437 		dst->tir_num = tir[ix].tirn;
438 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
439 	}
440 
441 	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
442 	spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
443 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
444 	if (IS_ERR(rule)) {
445 		err = PTR_ERR(rule);
446 		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
447 			   __func__, err);
448 		goto free;
449 	}
450 free:
451 	kvfree(spec);
452 	kfree(dst);
453 	return err ? ERR_PTR(err) : rule;
454 }
455 
del_ethtool_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule)456 static void del_ethtool_rule(struct mlx5e_priv *priv,
457 			     struct mlx5e_ethtool_rule *eth_rule)
458 {
459 	if (eth_rule->rule)
460 		mlx5_del_flow_rules(eth_rule->rule);
461 	list_del(&eth_rule->list);
462 	priv->fs.ethtool.tot_num_rules--;
463 	put_flow_table(eth_rule->eth_ft);
464 	kfree(eth_rule);
465 }
466 
find_ethtool_rule(struct mlx5e_priv * priv,int location)467 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
468 						    int location)
469 {
470 	struct mlx5e_ethtool_rule *iter;
471 
472 	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
473 		if (iter->flow_spec.location == location)
474 			return iter;
475 	}
476 	return NULL;
477 }
478 
get_ethtool_rule(struct mlx5e_priv * priv,int location)479 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
480 						   int location)
481 {
482 	struct mlx5e_ethtool_rule *eth_rule;
483 
484 	eth_rule = find_ethtool_rule(priv, location);
485 	if (eth_rule)
486 		del_ethtool_rule(priv, eth_rule);
487 
488 	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
489 	if (!eth_rule)
490 		return ERR_PTR(-ENOMEM);
491 
492 	add_rule_to_list(priv, eth_rule);
493 	return eth_rule;
494 }
495 
496 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
497 
498 #define all_ones(field) (field == (__force typeof(field))-1)
499 #define all_zeros_or_all_ones(field)		\
500 	((field) == 0 || (field) == (__force typeof(field))-1)
501 
validate_ethter(struct ethtool_rx_flow_spec * fs)502 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
503 {
504 	struct ethhdr *eth_mask = &fs->m_u.ether_spec;
505 	int ntuples = 0;
506 
507 	if (!is_zero_ether_addr(eth_mask->h_dest))
508 		ntuples++;
509 	if (!is_zero_ether_addr(eth_mask->h_source))
510 		ntuples++;
511 	if (eth_mask->h_proto)
512 		ntuples++;
513 	return ntuples;
514 }
515 
validate_tcpudp4(struct ethtool_rx_flow_spec * fs)516 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
517 {
518 	struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
519 	int ntuples = 0;
520 
521 	if (l4_mask->tos)
522 		return -EINVAL;
523 
524 	if (l4_mask->ip4src)
525 		ntuples++;
526 	if (l4_mask->ip4dst)
527 		ntuples++;
528 	if (l4_mask->psrc)
529 		ntuples++;
530 	if (l4_mask->pdst)
531 		ntuples++;
532 	/* Flow is TCP/UDP */
533 	return ++ntuples;
534 }
535 
validate_ip4(struct ethtool_rx_flow_spec * fs)536 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
537 {
538 	struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
539 	int ntuples = 0;
540 
541 	if (l3_mask->l4_4_bytes || l3_mask->tos ||
542 	    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
543 		return -EINVAL;
544 	if (l3_mask->ip4src)
545 		ntuples++;
546 	if (l3_mask->ip4dst)
547 		ntuples++;
548 	if (l3_mask->proto)
549 		ntuples++;
550 	/* Flow is IPv4 */
551 	return ++ntuples;
552 }
553 
validate_ip6(struct ethtool_rx_flow_spec * fs)554 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
555 {
556 	struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
557 	int ntuples = 0;
558 
559 	if (l3_mask->l4_4_bytes || l3_mask->tclass)
560 		return -EINVAL;
561 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
562 		ntuples++;
563 
564 	if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
565 		ntuples++;
566 	if (l3_mask->l4_proto)
567 		ntuples++;
568 	/* Flow is IPv6 */
569 	return ++ntuples;
570 }
571 
validate_tcpudp6(struct ethtool_rx_flow_spec * fs)572 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
573 {
574 	struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
575 	int ntuples = 0;
576 
577 	if (l4_mask->tclass)
578 		return -EINVAL;
579 
580 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
581 		ntuples++;
582 
583 	if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
584 		ntuples++;
585 
586 	if (l4_mask->psrc)
587 		ntuples++;
588 	if (l4_mask->pdst)
589 		ntuples++;
590 	/* Flow is TCP/UDP */
591 	return ++ntuples;
592 }
593 
validate_vlan(struct ethtool_rx_flow_spec * fs)594 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
595 {
596 	if (fs->m_ext.vlan_etype ||
597 	    fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
598 		return -EINVAL;
599 
600 	if (fs->m_ext.vlan_tci &&
601 	    (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
602 		return -EINVAL;
603 
604 	return 1;
605 }
606 
validate_flow(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)607 static int validate_flow(struct mlx5e_priv *priv,
608 			 struct ethtool_rx_flow_spec *fs)
609 {
610 	int num_tuples = 0;
611 	int ret = 0;
612 
613 	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
614 		return -ENOSPC;
615 
616 	if (fs->ring_cookie != RX_CLS_FLOW_DISC)
617 		if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
618 					fs->ring_cookie))
619 			return -EINVAL;
620 
621 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
622 	case ETHER_FLOW:
623 		num_tuples += validate_ethter(fs);
624 		break;
625 	case TCP_V4_FLOW:
626 	case UDP_V4_FLOW:
627 		ret = validate_tcpudp4(fs);
628 		if (ret < 0)
629 			return ret;
630 		num_tuples += ret;
631 		break;
632 	case IP_USER_FLOW:
633 		ret = validate_ip4(fs);
634 		if (ret < 0)
635 			return ret;
636 		num_tuples += ret;
637 		break;
638 	case TCP_V6_FLOW:
639 	case UDP_V6_FLOW:
640 		ret = validate_tcpudp6(fs);
641 		if (ret < 0)
642 			return ret;
643 		num_tuples += ret;
644 		break;
645 	case IPV6_USER_FLOW:
646 		ret = validate_ip6(fs);
647 		if (ret < 0)
648 			return ret;
649 		num_tuples += ret;
650 		break;
651 	default:
652 		return -ENOTSUPP;
653 	}
654 	if ((fs->flow_type & FLOW_EXT)) {
655 		ret = validate_vlan(fs);
656 		if (ret < 0)
657 			return ret;
658 		num_tuples += ret;
659 	}
660 
661 	if (fs->flow_type & FLOW_MAC_EXT &&
662 	    !is_zero_ether_addr(fs->m_ext.h_dest))
663 		num_tuples++;
664 
665 	return num_tuples;
666 }
667 
668 static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)669 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
670 			   struct ethtool_rx_flow_spec *fs)
671 {
672 	struct mlx5e_ethtool_table *eth_ft;
673 	struct mlx5e_ethtool_rule *eth_rule;
674 	struct mlx5_flow_handle *rule;
675 	int num_tuples;
676 	int err;
677 
678 	num_tuples = validate_flow(priv, fs);
679 	if (num_tuples <= 0) {
680 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
681 			    __func__, num_tuples);
682 		return num_tuples;
683 	}
684 
685 	eth_ft = get_flow_table(priv, fs, num_tuples);
686 	if (IS_ERR(eth_ft))
687 		return PTR_ERR(eth_ft);
688 
689 	eth_rule = get_ethtool_rule(priv, fs->location);
690 	if (IS_ERR(eth_rule)) {
691 		put_flow_table(eth_ft);
692 		return PTR_ERR(eth_rule);
693 	}
694 
695 	eth_rule->flow_spec = *fs;
696 	eth_rule->eth_ft = eth_ft;
697 	if (!eth_ft->ft) {
698 		err = -EINVAL;
699 		goto del_ethtool_rule;
700 	}
701 	rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
702 	if (IS_ERR(rule)) {
703 		err = PTR_ERR(rule);
704 		goto del_ethtool_rule;
705 	}
706 
707 	eth_rule->rule = rule;
708 
709 	return 0;
710 
711 del_ethtool_rule:
712 	del_ethtool_rule(priv, eth_rule);
713 
714 	return err;
715 }
716 
717 static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv * priv,int location)718 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
719 {
720 	struct mlx5e_ethtool_rule *eth_rule;
721 	int err = 0;
722 
723 	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
724 		return -ENOSPC;
725 
726 	eth_rule = find_ethtool_rule(priv, location);
727 	if (!eth_rule) {
728 		err =  -ENOENT;
729 		goto out;
730 	}
731 
732 	del_ethtool_rule(priv, eth_rule);
733 out:
734 	return err;
735 }
736 
737 static int
mlx5e_ethtool_get_flow(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,int location)738 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
739 		       struct ethtool_rxnfc *info, int location)
740 {
741 	struct mlx5e_ethtool_rule *eth_rule;
742 
743 	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
744 		return -EINVAL;
745 
746 	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
747 		if (eth_rule->flow_spec.location == location) {
748 			info->fs = eth_rule->flow_spec;
749 			return 0;
750 		}
751 	}
752 
753 	return -ENOENT;
754 }
755 
756 static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)757 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
758 			    struct ethtool_rxnfc *info, u32 *rule_locs)
759 {
760 	int location = 0;
761 	int idx = 0;
762 	int err = 0;
763 
764 	info->data = MAX_NUM_OF_ETHTOOL_RULES;
765 	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
766 		err = mlx5e_ethtool_get_flow(priv, info, location);
767 		if (!err)
768 			rule_locs[idx++] = location;
769 		location++;
770 	}
771 	return err;
772 }
773 
mlx5e_ethtool_cleanup_steering(struct mlx5e_priv * priv)774 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
775 {
776 	struct mlx5e_ethtool_rule *iter;
777 	struct mlx5e_ethtool_rule *temp;
778 
779 	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
780 		del_ethtool_rule(priv, iter);
781 }
782 
mlx5e_ethtool_init_steering(struct mlx5e_priv * priv)783 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
784 {
785 	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
786 }
787 
flow_type_to_traffic_type(u32 flow_type)788 static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
789 {
790 	switch (flow_type) {
791 	case TCP_V4_FLOW:
792 		return  MLX5E_TT_IPV4_TCP;
793 	case TCP_V6_FLOW:
794 		return MLX5E_TT_IPV6_TCP;
795 	case UDP_V4_FLOW:
796 		return MLX5E_TT_IPV4_UDP;
797 	case UDP_V6_FLOW:
798 		return MLX5E_TT_IPV6_UDP;
799 	case AH_V4_FLOW:
800 		return MLX5E_TT_IPV4_IPSEC_AH;
801 	case AH_V6_FLOW:
802 		return MLX5E_TT_IPV6_IPSEC_AH;
803 	case ESP_V4_FLOW:
804 		return MLX5E_TT_IPV4_IPSEC_ESP;
805 	case ESP_V6_FLOW:
806 		return MLX5E_TT_IPV6_IPSEC_ESP;
807 	case IPV4_FLOW:
808 		return MLX5E_TT_IPV4;
809 	case IPV6_FLOW:
810 		return MLX5E_TT_IPV6;
811 	default:
812 		return MLX5E_NUM_INDIR_TIRS;
813 	}
814 }
815 
mlx5e_set_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)816 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
817 				  struct ethtool_rxnfc *nfc)
818 {
819 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
820 	enum mlx5e_traffic_types tt;
821 	u8 rx_hash_field = 0;
822 	void *in;
823 
824 	tt = flow_type_to_traffic_type(nfc->flow_type);
825 	if (tt == MLX5E_NUM_INDIR_TIRS)
826 		return -EINVAL;
827 
828 	/*  RSS does not support anything other than hashing to queues
829 	 *  on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
830 	 *  port.
831 	 */
832 	if (nfc->flow_type != TCP_V4_FLOW &&
833 	    nfc->flow_type != TCP_V6_FLOW &&
834 	    nfc->flow_type != UDP_V4_FLOW &&
835 	    nfc->flow_type != UDP_V6_FLOW)
836 		return -EOPNOTSUPP;
837 
838 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
839 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
840 		return -EOPNOTSUPP;
841 
842 	if (nfc->data & RXH_IP_SRC)
843 		rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
844 	if (nfc->data & RXH_IP_DST)
845 		rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
846 	if (nfc->data & RXH_L4_B_0_1)
847 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
848 	if (nfc->data & RXH_L4_B_2_3)
849 		rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
850 
851 	in = kvzalloc(inlen, GFP_KERNEL);
852 	if (!in)
853 		return -ENOMEM;
854 
855 	mutex_lock(&priv->state_lock);
856 
857 	if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
858 		goto out;
859 
860 	priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
861 	mlx5e_modify_tirs_hash(priv, in);
862 
863 out:
864 	mutex_unlock(&priv->state_lock);
865 	kvfree(in);
866 	return 0;
867 }
868 
mlx5e_get_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)869 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
870 				  struct ethtool_rxnfc *nfc)
871 {
872 	enum mlx5e_traffic_types tt;
873 	u32 hash_field = 0;
874 
875 	tt = flow_type_to_traffic_type(nfc->flow_type);
876 	if (tt == MLX5E_NUM_INDIR_TIRS)
877 		return -EINVAL;
878 
879 	hash_field = priv->rss_params.rx_hash_fields[tt];
880 	nfc->data = 0;
881 
882 	if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
883 		nfc->data |= RXH_IP_SRC;
884 	if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
885 		nfc->data |= RXH_IP_DST;
886 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
887 		nfc->data |= RXH_L4_B_0_1;
888 	if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
889 		nfc->data |= RXH_L4_B_2_3;
890 
891 	return 0;
892 }
893 
mlx5e_ethtool_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)894 int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
895 {
896 	struct mlx5e_priv *priv = netdev_priv(dev);
897 	int err = 0;
898 
899 	switch (cmd->cmd) {
900 	case ETHTOOL_SRXCLSRLINS:
901 		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
902 		break;
903 	case ETHTOOL_SRXCLSRLDEL:
904 		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
905 		break;
906 	case ETHTOOL_SRXFH:
907 		err = mlx5e_set_rss_hash_opt(priv, cmd);
908 		break;
909 	default:
910 		err = -EOPNOTSUPP;
911 		break;
912 	}
913 
914 	return err;
915 }
916 
mlx5e_ethtool_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rule_locs)917 int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
918 			    struct ethtool_rxnfc *info, u32 *rule_locs)
919 {
920 	struct mlx5e_priv *priv = netdev_priv(dev);
921 	int err = 0;
922 
923 	switch (info->cmd) {
924 	case ETHTOOL_GRXCLSRLCNT:
925 		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
926 		break;
927 	case ETHTOOL_GRXCLSRULE:
928 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
929 		break;
930 	case ETHTOOL_GRXCLSRLALL:
931 		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
932 		break;
933 	case ETHTOOL_GRXFH:
934 		err =  mlx5e_get_rss_hash_opt(priv, info);
935 		break;
936 	default:
937 		err = -EOPNOTSUPP;
938 		break;
939 	}
940 
941 	return err;
942 }
943 
944