1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef __MLX5_EN_TC_H__
34 #define __MLX5_EN_TC_H__
35
36 #include <net/pkt_cls.h>
37 #include "en.h"
38 #include "eswitch.h"
39 #include "en/tc_ct.h"
40 #include "en/tc_tun.h"
41 #include "en/tc/int_port.h"
42 #include "en/tc/meter.h"
43 #include "en_rep.h"
44
45 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
46
47 #ifdef CONFIG_MLX5_ESWITCH
48
49 #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
50 sizeof(struct mlx5_nic_flow_attr))
51 #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
52 sizeof(struct mlx5_esw_flow_attr))
53 #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
54 ESW_FLOW_ATTR_SZ :\
55 NIC_FLOW_ATTR_SZ)
56
57 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
58 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
59
60 struct mlx5e_tc_update_priv {
61 struct net_device *fwd_dev;
62 };
63
64 struct mlx5_nic_flow_attr {
65 u32 flow_tag;
66 u32 hairpin_tirn;
67 struct mlx5_flow_table *hairpin_ft;
68 };
69
70 struct mlx5_flow_attr {
71 u32 action;
72 struct mlx5_fc *counter;
73 struct mlx5_modify_hdr *modify_hdr;
74 struct mlx5_ct_attr ct_attr;
75 struct mlx5e_sample_attr sample_attr;
76 struct mlx5e_meter_attr meter_attr;
77 struct mlx5e_tc_flow_parse_attr *parse_attr;
78 u32 chain;
79 u16 prio;
80 u32 dest_chain;
81 struct mlx5_flow_table *ft;
82 struct mlx5_flow_table *dest_ft;
83 u8 inner_match_level;
84 u8 outer_match_level;
85 u8 tun_ip_version;
86 int tunnel_id; /* mapped tunnel id */
87 u32 flags;
88 u32 exe_aso_type;
89 struct list_head list;
90 struct mlx5e_post_act_handle *post_act_handle;
91 struct {
92 /* Indicate whether the parsed flow should be counted for lag mode decision
93 * making
94 */
95 bool count;
96 } lag;
97 /* keep this union last */
98 union {
99 struct mlx5_esw_flow_attr esw_attr[0];
100 struct mlx5_nic_flow_attr nic_attr[0];
101 };
102 };
103
104 enum {
105 MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0),
106 MLX5_ATTR_FLAG_SLOW_PATH = BIT(1),
107 MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2),
108 MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3),
109 MLX5_ATTR_FLAG_SAMPLE = BIT(4),
110 MLX5_ATTR_FLAG_ACCEPT = BIT(5),
111 MLX5_ATTR_FLAG_CT = BIT(6),
112 };
113
114 /* Returns true if any of the flags that require skipping further TC/NF processing are set. */
115 static inline bool
mlx5e_tc_attr_flags_skip(u32 attr_flags)116 mlx5e_tc_attr_flags_skip(u32 attr_flags)
117 {
118 return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
119 }
120
121 struct mlx5_rx_tun_attr {
122 u16 decap_vport;
123 union {
124 __be32 v4;
125 struct in6_addr v6;
126 } src_ip; /* Valid if decap_vport is not zero */
127 union {
128 __be32 v4;
129 struct in6_addr v6;
130 } dst_ip; /* Valid if decap_vport is not zero */
131 };
132
133 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
134 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
135
136 #define MLX5E_TC_MAX_INT_PORT_NUM (8)
137
138 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
139
140 struct tunnel_match_key {
141 struct flow_dissector_key_control enc_control;
142 struct flow_dissector_key_keyid enc_key_id;
143 struct flow_dissector_key_ports enc_tp;
144 struct flow_dissector_key_ip enc_ip;
145 union {
146 struct flow_dissector_key_ipv4_addrs enc_ipv4;
147 struct flow_dissector_key_ipv6_addrs enc_ipv6;
148 };
149
150 int filter_ifindex;
151 };
152
153 struct tunnel_match_enc_opts {
154 struct flow_dissector_key_enc_opts key;
155 struct flow_dissector_key_enc_opts mask;
156 };
157
158 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
159 * Upper TUNNEL_INFO_BITS for general tunnel info.
160 * Lower ENC_OPTS_BITS bits for enc_opts.
161 */
162 #define TUNNEL_INFO_BITS 12
163 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
164 #define ENC_OPTS_BITS 11
165 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
166 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
167 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
168
169 enum {
170 MLX5E_TC_FLAG_INGRESS_BIT,
171 MLX5E_TC_FLAG_EGRESS_BIT,
172 MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
173 MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
174 MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
175 MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
176 };
177
178 #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
179
180 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
181 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
182
183 int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
184 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
185
186 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
187 struct flow_cls_offload *f, unsigned long flags);
188 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
189 struct flow_cls_offload *f, unsigned long flags);
190
191 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
192 struct flow_cls_offload *f, unsigned long flags);
193
194 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
195 struct tc_cls_matchall_offload *f);
196 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
197 struct tc_cls_matchall_offload *f);
198 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
199 struct tc_cls_matchall_offload *ma);
200
201 struct mlx5e_encap_entry;
202 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
203 struct mlx5e_encap_entry *e,
204 struct list_head *flow_list);
205 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
206 struct mlx5e_encap_entry *e,
207 struct list_head *flow_list);
208 bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
209 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
210
211 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
212 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
213
214 struct mlx5e_neigh_hash_entry;
215 struct mlx5e_encap_entry *
216 mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
217 struct mlx5e_encap_entry *e);
218 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
219
220 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
221
222 enum mlx5e_tc_attr_to_reg {
223 CHAIN_TO_REG,
224 VPORT_TO_REG,
225 TUNNEL_TO_REG,
226 CTSTATE_TO_REG,
227 ZONE_TO_REG,
228 ZONE_RESTORE_TO_REG,
229 MARK_TO_REG,
230 LABELS_TO_REG,
231 FTEID_TO_REG,
232 NIC_CHAIN_TO_REG,
233 NIC_ZONE_RESTORE_TO_REG,
234 PACKET_COLOR_TO_REG,
235 };
236
237 struct mlx5e_tc_attr_to_reg_mapping {
238 int mfield; /* rewrite field */
239 int moffset; /* bit offset of mfield */
240 int mlen; /* bits to rewrite/match */
241
242 int soffset; /* byte offset of spec for match */
243 };
244
245 extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
246
247 #define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
248 #define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
249 #define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
250
251 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
252 struct net_device *out_dev);
253
254 int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
255 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
256 enum mlx5_flow_namespace_type ns,
257 enum mlx5e_tc_attr_to_reg type,
258 u32 data);
259
260 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
261 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
262 enum mlx5e_tc_attr_to_reg type,
263 int act_id, u32 data);
264
265 void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
266 enum mlx5e_tc_attr_to_reg type,
267 u32 data,
268 u32 mask);
269
270 void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
271 enum mlx5e_tc_attr_to_reg type,
272 u32 *data,
273 u32 *mask);
274
275 int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
276 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
277 enum mlx5_flow_namespace_type ns,
278 enum mlx5e_tc_attr_to_reg type,
279 u32 data);
280
281 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
282 struct mlx5e_tc_flow *flow,
283 struct mlx5_flow_attr *attr);
284
285 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
286 struct flow_match_basic *match, bool outer,
287 void *headers_c, void *headers_v);
288
289 int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
290 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
291
292 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
293 void *cb_priv);
294
295 struct mlx5_flow_handle *
296 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
297 struct mlx5_flow_spec *spec,
298 struct mlx5_flow_attr *attr);
299 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
300 struct mlx5_flow_handle *rule,
301 struct mlx5_flow_attr *attr);
302
303 struct mlx5_flow_handle *
304 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
305 struct mlx5_flow_spec *spec,
306 struct mlx5_flow_attr *attr);
307 void
308 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
309 struct mlx5_flow_handle *rule,
310 struct mlx5_flow_attr *attr);
311
312 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
313 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
314 u16 *vport);
315
316 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
317 struct mlx5_flow_attr *attr,
318 int ifindex,
319 enum mlx5e_tc_int_port_type type,
320 u32 *action,
321 int out_index);
322 #else /* CONFIG_MLX5_CLS_ACT */
mlx5e_tc_nic_init(struct mlx5e_priv * priv)323 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)324 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
mlx5e_tc_ht_init(struct rhashtable * tc_ht)325 static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
mlx5e_tc_ht_cleanup(struct rhashtable * tc_ht)326 static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
327 static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)328 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
329 { return -EOPNOTSUPP; }
330
331 #endif /* CONFIG_MLX5_CLS_ACT */
332
333 struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
334
335 struct mlx5_flow_handle *
336 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
337 struct mlx5_flow_spec *spec,
338 struct mlx5_flow_attr *attr);
339 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
340 struct mlx5_flow_handle *rule,
341 struct mlx5_flow_attr *attr);
342
343 #else /* CONFIG_MLX5_ESWITCH */
mlx5e_tc_nic_init(struct mlx5e_priv * priv)344 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)345 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
mlx5e_tc_num_filters(struct mlx5e_priv * priv,unsigned long flags)346 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
347 unsigned long flags)
348 {
349 return 0;
350 }
351
352 static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)353 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
354 { return -EOPNOTSUPP; }
355 #endif
356
357 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
358 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
359 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
mlx5e_cqe_regb_chain(struct mlx5_cqe64 * cqe)360 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
361 {
362 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
363 u32 chain, reg_b;
364
365 reg_b = be32_to_cpu(cqe->ft_metadata);
366
367 if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
368 return false;
369
370 chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
371 if (chain)
372 return true;
373 #endif
374
375 return false;
376 }
377
378 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
379 #else /* CONFIG_MLX5_CLS_ACT */
mlx5e_tc_table_alloc(void)380 static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
mlx5e_tc_table_free(struct mlx5e_tc_table * tc)381 static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
mlx5e_cqe_regb_chain(struct mlx5_cqe64 * cqe)382 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
383 { return false; }
384 static inline bool
mlx5e_tc_update_skb(struct mlx5_cqe64 * cqe,struct sk_buff * skb)385 mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
386 { return true; }
387 #endif
388
389 #endif /* __MLX5_EN_TC_H__ */
390