1 /*
2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
35
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <net/devlink.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/mlx5/eswitch.h>
42 #include <linux/mlx5/vport.h>
43 #include <linux/mlx5/fs.h>
44 #include "lib/mpfs.h"
45 #include "lib/fs_chains.h"
46 #include "en/tc_ct.h"
47
48 #ifdef CONFIG_MLX5_ESWITCH
49
50 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
51
52 #define MLX5_MAX_UC_PER_VPORT(dev) \
53 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
54
55 #define MLX5_MAX_MC_PER_VPORT(dev) \
56 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
57
58 #define MLX5_MIN_BW_SHARE 1
59
60 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
61 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
62
63 #define mlx5_esw_has_fwd_fdb(dev) \
64 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
65
66 #define esw_chains(esw) \
67 ((esw)->fdb_table.offloads.esw_chains_priv)
68
69 struct vport_ingress {
70 struct mlx5_flow_table *acl;
71 struct mlx5_flow_handle *allow_rule;
72 struct {
73 struct mlx5_flow_group *allow_spoofchk_only_grp;
74 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
75 struct mlx5_flow_group *allow_untagged_only_grp;
76 struct mlx5_flow_group *drop_grp;
77 struct mlx5_flow_handle *drop_rule;
78 struct mlx5_fc *drop_counter;
79 } legacy;
80 struct {
81 /* Optional group to add an FTE to do internal priority
82 * tagging on ingress packets.
83 */
84 struct mlx5_flow_group *metadata_prio_tag_grp;
85 /* Group to add default match-all FTE entry to tag ingress
86 * packet with metadata.
87 */
88 struct mlx5_flow_group *metadata_allmatch_grp;
89 struct mlx5_modify_hdr *modify_metadata;
90 struct mlx5_flow_handle *modify_metadata_rule;
91 } offloads;
92 };
93
94 struct vport_egress {
95 struct mlx5_flow_table *acl;
96 struct mlx5_flow_handle *allowed_vlan;
97 struct mlx5_flow_group *vlan_grp;
98 union {
99 struct {
100 struct mlx5_flow_group *drop_grp;
101 struct mlx5_flow_handle *drop_rule;
102 struct mlx5_fc *drop_counter;
103 } legacy;
104 struct {
105 struct mlx5_flow_group *fwd_grp;
106 struct mlx5_flow_handle *fwd_rule;
107 } offloads;
108 };
109 };
110
111 struct mlx5_vport_drop_stats {
112 u64 rx_dropped;
113 u64 tx_dropped;
114 };
115
116 struct mlx5_vport_info {
117 u8 mac[ETH_ALEN];
118 u16 vlan;
119 u8 qos;
120 u64 node_guid;
121 int link_state;
122 u32 min_rate;
123 u32 max_rate;
124 bool spoofchk;
125 bool trusted;
126 };
127
128 /* Vport context events */
129 enum mlx5_eswitch_vport_event {
130 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
131 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
132 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
133 };
134
135 struct mlx5_vport {
136 struct mlx5_core_dev *dev;
137 int vport;
138 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
139 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
140 struct mlx5_flow_handle *promisc_rule;
141 struct mlx5_flow_handle *allmulti_rule;
142 struct work_struct vport_change_handler;
143
144 struct vport_ingress ingress;
145 struct vport_egress egress;
146 u32 default_metadata;
147 u32 metadata;
148
149 struct mlx5_vport_info info;
150
151 struct {
152 bool enabled;
153 u32 esw_tsar_ix;
154 u32 bw_share;
155 } qos;
156
157 bool enabled;
158 enum mlx5_eswitch_vport_event enabled_events;
159 struct devlink_port *dl_port;
160 };
161
162 struct mlx5_eswitch_fdb {
163 union {
164 struct legacy_fdb {
165 struct mlx5_flow_table *fdb;
166 struct mlx5_flow_group *addr_grp;
167 struct mlx5_flow_group *allmulti_grp;
168 struct mlx5_flow_group *promisc_grp;
169 struct mlx5_flow_table *vepa_fdb;
170 struct mlx5_flow_handle *vepa_uplink_rule;
171 struct mlx5_flow_handle *vepa_star_rule;
172 } legacy;
173
174 struct offloads_fdb {
175 struct mlx5_flow_namespace *ns;
176 struct mlx5_flow_table *slow_fdb;
177 struct mlx5_flow_group *send_to_vport_grp;
178 struct mlx5_flow_group *peer_miss_grp;
179 struct mlx5_flow_handle **peer_miss_rules;
180 struct mlx5_flow_group *miss_grp;
181 struct mlx5_flow_handle *miss_rule_uni;
182 struct mlx5_flow_handle *miss_rule_multi;
183 int vlan_push_pop_refcount;
184
185 struct mlx5_fs_chains *esw_chains_priv;
186 struct {
187 DECLARE_HASHTABLE(table, 8);
188 /* Protects vports.table */
189 struct mutex lock;
190 } vports;
191
192 } offloads;
193 };
194 u32 flags;
195 };
196
197 struct mlx5_esw_offload {
198 struct mlx5_flow_table *ft_offloads_restore;
199 struct mlx5_flow_group *restore_group;
200 struct mlx5_modify_hdr *restore_copy_hdr_id;
201
202 struct mlx5_flow_table *ft_offloads;
203 struct mlx5_flow_group *vport_rx_group;
204 struct mlx5_eswitch_rep *vport_reps;
205 struct list_head peer_flows;
206 struct mutex peer_mutex;
207 struct mutex encap_tbl_lock; /* protects encap_tbl */
208 DECLARE_HASHTABLE(encap_tbl, 8);
209 struct mutex decap_tbl_lock; /* protects decap_tbl */
210 DECLARE_HASHTABLE(decap_tbl, 8);
211 struct mod_hdr_tbl mod_hdr;
212 DECLARE_HASHTABLE(termtbl_tbl, 8);
213 struct mutex termtbl_mutex; /* protects termtbl hash */
214 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
215 u8 inline_mode;
216 atomic64_t num_flows;
217 enum devlink_eswitch_encap_mode encap;
218 struct ida vport_metadata_ida;
219 unsigned int host_number; /* ECPF supports one external host */
220 };
221
222 /* E-Switch MC FDB table hash node */
223 struct esw_mc_addr { /* SRIOV only */
224 struct l2addr_node node;
225 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
226 u32 refcnt;
227 };
228
229 struct mlx5_host_work {
230 struct work_struct work;
231 struct mlx5_eswitch *esw;
232 };
233
234 struct mlx5_esw_functions {
235 struct mlx5_nb nb;
236 u16 num_vfs;
237 };
238
239 enum {
240 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
241 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
242 };
243
244 struct mlx5_eswitch {
245 struct mlx5_core_dev *dev;
246 struct mlx5_nb nb;
247 struct mlx5_eswitch_fdb fdb_table;
248 /* legacy data structures */
249 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
250 struct esw_mc_addr mc_promisc;
251 /* end of legacy */
252 struct workqueue_struct *work_queue;
253 struct mlx5_vport *vports;
254 u32 flags;
255 int total_vports;
256 int enabled_vports;
257 /* Synchronize between vport change events
258 * and async SRIOV admin state changes
259 */
260 struct mutex state_lock;
261
262 /* Protects eswitch mode change that occurs via one or more
263 * user commands, i.e. sriov state change, devlink commands.
264 */
265 struct rw_semaphore mode_lock;
266
267 struct {
268 bool enabled;
269 u32 root_tsar_id;
270 } qos;
271
272 struct mlx5_esw_offload offloads;
273 int mode;
274 u16 manager_vport;
275 u16 first_host_vport;
276 struct mlx5_esw_functions esw_funcs;
277 struct {
278 u32 large_group_num;
279 } params;
280 };
281
282 void esw_offloads_disable(struct mlx5_eswitch *esw);
283 int esw_offloads_enable(struct mlx5_eswitch *esw);
284 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
285 int esw_offloads_init_reps(struct mlx5_eswitch *esw);
286
287 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
288 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
289
290 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
291 u32 rate_mbps);
292
293 /* E-Switch API */
294 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
295 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
296
297 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
298 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
299 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
300 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
301 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
302 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
303 u16 vport, const u8 *mac);
304 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
305 u16 vport, int link_state);
306 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
307 u16 vport, u16 vlan, u8 qos);
308 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
309 u16 vport, bool spoofchk);
310 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
311 u16 vport_num, bool setting);
312 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
313 u32 max_rate, u32 min_rate);
314 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
315 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
316 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
317 u16 vport, struct ifla_vf_info *ivi);
318 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
319 u16 vport,
320 struct ifla_vf_stats *vf_stats);
321 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
322
323 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
324 bool other_vport, void *in);
325
326 struct mlx5_flow_spec;
327 struct mlx5_esw_flow_attr;
328 struct mlx5_termtbl_handle;
329
330 bool
331 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
332 struct mlx5_flow_attr *attr,
333 struct mlx5_flow_act *flow_act,
334 struct mlx5_flow_spec *spec);
335
336 struct mlx5_flow_handle *
337 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
338 struct mlx5_flow_table *ft,
339 struct mlx5_flow_spec *spec,
340 struct mlx5_esw_flow_attr *attr,
341 struct mlx5_flow_act *flow_act,
342 struct mlx5_flow_destination *dest,
343 int num_dest);
344
345 void
346 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
347 struct mlx5_termtbl_handle *tt);
348
349 struct mlx5_flow_handle *
350 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
351 struct mlx5_flow_spec *spec,
352 struct mlx5_flow_attr *attr);
353 struct mlx5_flow_handle *
354 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
355 struct mlx5_flow_spec *spec,
356 struct mlx5_flow_attr *attr);
357 void
358 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
359 struct mlx5_flow_handle *rule,
360 struct mlx5_flow_attr *attr);
361 void
362 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
363 struct mlx5_flow_handle *rule,
364 struct mlx5_flow_attr *attr);
365
366 struct mlx5_flow_handle *
367 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
368 struct mlx5_flow_destination *dest);
369
370 enum {
371 SET_VLAN_STRIP = BIT(0),
372 SET_VLAN_INSERT = BIT(1)
373 };
374
375 enum mlx5_flow_match_level {
376 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
377 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
378 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
379 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
380 };
381
382 /* current maximum for flow based vport multicasting */
383 #define MLX5_MAX_FLOW_FWD_VPORTS 2
384
385 enum {
386 MLX5_ESW_DEST_ENCAP = BIT(0),
387 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
388 };
389
390 enum {
391 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
392 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
393 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
394 };
395
396 struct mlx5_esw_flow_attr {
397 struct mlx5_eswitch_rep *in_rep;
398 struct mlx5_core_dev *in_mdev;
399 struct mlx5_core_dev *counter_dev;
400
401 int split_count;
402 int out_count;
403
404 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
405 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
406 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
407 u8 total_vlan;
408 struct {
409 u32 flags;
410 struct mlx5_eswitch_rep *rep;
411 struct mlx5_pkt_reformat *pkt_reformat;
412 struct mlx5_core_dev *mdev;
413 struct mlx5_termtbl_handle *termtbl;
414 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
415 struct mlx5_pkt_reformat *decap_pkt_reformat;
416 };
417
418 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
419 struct netlink_ext_ack *extack);
420 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
421 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
422 struct netlink_ext_ack *extack);
423 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
424 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
425 enum devlink_eswitch_encap_mode encap,
426 struct netlink_ext_ack *extack);
427 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
428 enum devlink_eswitch_encap_mode *encap);
429 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
430 struct devlink_port *port,
431 u8 *hw_addr, int *hw_addr_len,
432 struct netlink_ext_ack *extack);
433 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
434 struct devlink_port *port,
435 const u8 *hw_addr, int hw_addr_len,
436 struct netlink_ext_ack *extack);
437
438 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
439
440 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
441 struct mlx5_flow_attr *attr);
442 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
443 struct mlx5_flow_attr *attr);
444 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
445 u16 vport, u16 vlan, u8 qos, u8 set_flags);
446
mlx5_esw_qos_enabled(struct mlx5_eswitch * esw)447 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
448 {
449 return esw->qos.enabled;
450 }
451
mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev * dev,u8 vlan_depth)452 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
453 u8 vlan_depth)
454 {
455 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
456 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
457
458 if (vlan_depth == 1)
459 return ret;
460
461 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
462 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
463 }
464
465 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
466 struct mlx5_core_dev *dev1);
467 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
468 struct mlx5_core_dev *dev1);
469
470 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
471
472 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
473
474 #define esw_info(__dev, format, ...) \
475 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
476
477 #define esw_warn(__dev, format, ...) \
478 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
479
480 #define esw_debug(dev, format, ...) \
481 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
482
483 /* The returned number is valid only when the dev is eswitch manager. */
mlx5_eswitch_manager_vport(struct mlx5_core_dev * dev)484 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
485 {
486 return mlx5_core_is_ecpf_esw_manager(dev) ?
487 MLX5_VPORT_ECPF : MLX5_VPORT_PF;
488 }
489
490 static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch * esw,u16 vport_num)491 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
492 {
493 return esw->manager_vport == vport_num;
494 }
495
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev * dev)496 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
497 {
498 return mlx5_core_is_ecpf_esw_manager(dev) ?
499 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
500 }
501
mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev * dev)502 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
503 {
504 return mlx5_core_is_ecpf_esw_manager(dev);
505 }
506
mlx5_eswitch_uplink_idx(struct mlx5_eswitch * esw)507 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
508 {
509 /* Uplink always locate at the last element of the array.*/
510 return esw->total_vports - 1;
511 }
512
mlx5_eswitch_ecpf_idx(struct mlx5_eswitch * esw)513 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
514 {
515 return esw->total_vports - 2;
516 }
517
mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch * esw,u16 vport_num)518 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
519 u16 vport_num)
520 {
521 if (vport_num == MLX5_VPORT_ECPF) {
522 if (!mlx5_ecpf_vport_exists(esw->dev))
523 esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
524 return mlx5_eswitch_ecpf_idx(esw);
525 }
526
527 if (vport_num == MLX5_VPORT_UPLINK)
528 return mlx5_eswitch_uplink_idx(esw);
529
530 return vport_num;
531 }
532
mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch * esw,int index)533 static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
534 int index)
535 {
536 if (index == mlx5_eswitch_ecpf_idx(esw) &&
537 mlx5_ecpf_vport_exists(esw->dev))
538 return MLX5_VPORT_ECPF;
539
540 if (index == mlx5_eswitch_uplink_idx(esw))
541 return MLX5_VPORT_UPLINK;
542
543 return index;
544 }
545
546 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)547 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
548 u16 vport_num)
549 {
550 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
551 }
552
553 static inline u16
mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)554 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
555 {
556 return dl_port_index & 0xffff;
557 }
558
559 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
560 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
561
562 /* The vport getter/iterator are only valid after esw->total_vports
563 * and vport->vport are initialized in mlx5_eswitch_init.
564 */
565 #define mlx5_esw_for_all_vports(esw, i, vport) \
566 for ((i) = MLX5_VPORT_PF; \
567 (vport) = &(esw)->vports[i], \
568 (i) < (esw)->total_vports; (i)++)
569
570 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
571 for ((i) = (esw)->total_vports - 1; \
572 (vport) = &(esw)->vports[i], \
573 (i) >= MLX5_VPORT_PF; (i)--)
574
575 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
576 for ((i) = MLX5_VPORT_FIRST_VF; \
577 (vport) = &(esw)->vports[(i)], \
578 (i) <= (nvfs); (i)++)
579
580 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
581 for ((i) = (nvfs); \
582 (vport) = &(esw)->vports[(i)], \
583 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
584
585 /* The rep getter/iterator are only valid after esw->total_vports
586 * and vport->vport are initialized in mlx5_eswitch_init.
587 */
588 #define mlx5_esw_for_all_reps(esw, i, rep) \
589 for ((i) = MLX5_VPORT_PF; \
590 (rep) = &(esw)->offloads.vport_reps[i], \
591 (i) < (esw)->total_vports; (i)++)
592
593 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
594 for ((i) = MLX5_VPORT_FIRST_VF; \
595 (rep) = &(esw)->offloads.vport_reps[i], \
596 (i) <= (nvfs); (i)++)
597
598 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
599 for ((i) = (nvfs); \
600 (rep) = &(esw)->offloads.vport_reps[i], \
601 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
602
603 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
604 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
605
606 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
607 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
608
609 /* Includes host PF (vport 0) if it's not esw manager. */
610 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
611 for ((i) = (esw)->first_host_vport; \
612 (rep) = &(esw)->offloads.vport_reps[i], \
613 (i) <= (nvfs); (i)++)
614
615 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
616 for ((i) = (nvfs); \
617 (rep) = &(esw)->offloads.vport_reps[i], \
618 (i) >= (esw)->first_host_vport; (i)--)
619
620 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
621 for ((vport) = (esw)->first_host_vport; \
622 (vport) <= (nvfs); (vport)++)
623
624 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
625 for ((vport) = (nvfs); \
626 (vport) >= (esw)->first_host_vport; (vport)--)
627
628 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
629 struct mlx5_vport *__must_check
630 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
631
632 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
633
634 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
635
636 int
637 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
638 enum mlx5_eswitch_vport_event enabled_events);
639 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
640
641 int
642 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
643 struct mlx5_vport *vport);
644 void
645 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
646 struct mlx5_vport *vport);
647
648 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
649 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
650
651 struct mlx5_flow_handle *
652 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
653 u32
654 esw_get_max_restore_tag(struct mlx5_eswitch *esw);
655
656 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
657 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
658
659 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
660 enum mlx5_eswitch_vport_event enabled_events);
661 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
662
663 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
664 enum mlx5_eswitch_vport_event enabled_events);
665 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
666
667 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
668 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
669 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
670 #else /* CONFIG_MLX5_ESWITCH */
671 /* eswitch API stubs */
mlx5_eswitch_init(struct mlx5_core_dev * dev)672 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)673 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)674 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
mlx5_eswitch_disable(struct mlx5_eswitch * esw,bool clear_vf)675 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
mlx5_esw_lag_prereq(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)676 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev * dev)677 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
678 static inline
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)679 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
mlx5_esw_query_functions(struct mlx5_core_dev * dev)680 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
681 {
682 return ERR_PTR(-EOPNOTSUPP);
683 }
684
685 static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)686 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
687 {
688 return ERR_PTR(-EOPNOTSUPP);
689 }
690 #endif /* CONFIG_MLX5_ESWITCH */
691
692 #endif /* __MLX5_ESWITCH_H__ */
693