1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40
41 enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44 };
45
46 struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_esw_flow_attr * attr)47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
50 {
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
55 void *misc;
56 int i = 0;
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
63
64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
70 counter = mlx5_fc_create(esw->dev, true);
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
90
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
95 flow_act.encap_id = attr->encap_id;
96
97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
98 spec, &flow_act, dest, i);
99 if (IS_ERR(rule))
100 goto err_add_rule;
101 else
102 esw->offloads.num_flows++;
103
104 return rule;
105
106 err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108 err_counter_alloc:
109 return rule;
110 }
111
112 void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_esw_flow_attr * attr)113 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116 {
117 struct mlx5_fc *counter = NULL;
118
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
123 }
124
esw_set_global_vlan_pop(struct mlx5_eswitch * esw,u8 val)125 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126 {
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141 out:
142 return err;
143 }
144
145 static struct mlx5_eswitch_rep *
esw_vlan_action_get_vport(struct mlx5_esw_flow_attr * attr,bool push,bool pop)146 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147 {
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161 }
162
esw_add_vlan_action_check(struct mlx5_esw_flow_attr * attr,bool push,bool pop,bool fwd)163 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165 {
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193 out_notsupp:
194 return -EOPNOTSUPP;
195 }
196
mlx5_eswitch_add_vlan_action(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * attr)197 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199 {
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247 skip_set_push:
248 vport->vlan_refcount++;
249 }
250 out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254 }
255
mlx5_eswitch_del_vlan_action(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * attr)256 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258 {
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293 skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301 out:
302 return err;
303 }
304
305 static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch * esw,int vport,u32 sqn)306 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307 {
308 struct mlx5_flow_act flow_act = {0};
309 struct mlx5_flow_destination dest;
310 struct mlx5_flow_handle *flow_rule;
311 struct mlx5_flow_spec *spec;
312 void *misc;
313
314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
315 if (!spec) {
316 flow_rule = ERR_PTR(-ENOMEM);
317 goto out;
318 }
319
320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
321 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
322 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
323
324 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
327
328 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
330 dest.vport_num = vport;
331 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
332
333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
334 &flow_act, &dest, 1);
335 if (IS_ERR(flow_rule))
336 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
337 out:
338 kvfree(spec);
339 return flow_rule;
340 }
341
mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)342 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
343 struct mlx5_eswitch_rep *rep)
344 {
345 struct mlx5_esw_sq *esw_sq, *tmp;
346
347 if (esw->mode != SRIOV_OFFLOADS)
348 return;
349
350 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
351 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
352 list_del(&esw_sq->list);
353 kfree(esw_sq);
354 }
355 }
356
mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u16 * sqns_array,int sqns_num)357 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
358 struct mlx5_eswitch_rep *rep,
359 u16 *sqns_array, int sqns_num)
360 {
361 struct mlx5_flow_handle *flow_rule;
362 struct mlx5_esw_sq *esw_sq;
363 int err;
364 int i;
365
366 if (esw->mode != SRIOV_OFFLOADS)
367 return 0;
368
369 for (i = 0; i < sqns_num; i++) {
370 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
371 if (!esw_sq) {
372 err = -ENOMEM;
373 goto out_err;
374 }
375
376 /* Add re-inject rule to the PF/representor sqs */
377 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
378 rep->vport,
379 sqns_array[i]);
380 if (IS_ERR(flow_rule)) {
381 err = PTR_ERR(flow_rule);
382 kfree(esw_sq);
383 goto out_err;
384 }
385 esw_sq->send_to_vport_rule = flow_rule;
386 list_add(&esw_sq->list, &rep->vport_sqs_list);
387 }
388 return 0;
389
390 out_err:
391 mlx5_eswitch_sqs2vport_stop(esw, rep);
392 return err;
393 }
394
esw_add_fdb_miss_rule(struct mlx5_eswitch * esw)395 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
396 {
397 struct mlx5_flow_act flow_act = {0};
398 struct mlx5_flow_destination dest;
399 struct mlx5_flow_handle *flow_rule = NULL;
400 struct mlx5_flow_spec *spec;
401 int err = 0;
402
403 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
404 if (!spec) {
405 err = -ENOMEM;
406 goto out;
407 }
408
409 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
410 dest.vport_num = 0;
411 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
412
413 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
414 &flow_act, &dest, 1);
415 if (IS_ERR(flow_rule)) {
416 err = PTR_ERR(flow_rule);
417 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
418 goto out;
419 }
420
421 esw->fdb_table.offloads.miss_rule = flow_rule;
422 out:
423 kvfree(spec);
424 return err;
425 }
426
427 #define ESW_OFFLOADS_NUM_GROUPS 4
428
esw_create_offloads_fast_fdb_table(struct mlx5_eswitch * esw)429 static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
430 {
431 struct mlx5_core_dev *dev = esw->dev;
432 struct mlx5_flow_namespace *root_ns;
433 struct mlx5_flow_table *fdb = NULL;
434 int esw_size, err = 0;
435 u32 flags = 0;
436 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
437 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
438
439 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
440 if (!root_ns) {
441 esw_warn(dev, "Failed to get FDB flow namespace\n");
442 err = -EOPNOTSUPP;
443 goto out;
444 }
445
446 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
447 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
448 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
449
450 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
451 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
452
453 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
454 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
455
456 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
457 esw_size,
458 ESW_OFFLOADS_NUM_GROUPS, 0,
459 flags);
460 if (IS_ERR(fdb)) {
461 err = PTR_ERR(fdb);
462 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
463 goto out;
464 }
465 esw->fdb_table.fdb = fdb;
466
467 out:
468 return err;
469 }
470
esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch * esw)471 static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
472 {
473 mlx5_destroy_flow_table(esw->fdb_table.fdb);
474 }
475
476 #define MAX_PF_SQ 256
477
esw_create_offloads_fdb_tables(struct mlx5_eswitch * esw,int nvports)478 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
479 {
480 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
481 struct mlx5_flow_table_attr ft_attr = {};
482 struct mlx5_core_dev *dev = esw->dev;
483 struct mlx5_flow_namespace *root_ns;
484 struct mlx5_flow_table *fdb = NULL;
485 int table_size, ix, err = 0;
486 struct mlx5_flow_group *g;
487 void *match_criteria;
488 u32 *flow_group_in;
489
490 esw_debug(esw->dev, "Create offloads FDB Tables\n");
491 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
492 if (!flow_group_in)
493 return -ENOMEM;
494
495 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
496 if (!root_ns) {
497 esw_warn(dev, "Failed to get FDB flow namespace\n");
498 err = -EOPNOTSUPP;
499 goto ns_err;
500 }
501
502 err = esw_create_offloads_fast_fdb_table(esw);
503 if (err)
504 goto fast_fdb_err;
505
506 table_size = nvports + MAX_PF_SQ + 1;
507
508 ft_attr.max_fte = table_size;
509 ft_attr.prio = FDB_SLOW_PATH;
510
511 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
512 if (IS_ERR(fdb)) {
513 err = PTR_ERR(fdb);
514 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
515 goto slow_fdb_err;
516 }
517 esw->fdb_table.offloads.fdb = fdb;
518
519 /* create send-to-vport group */
520 memset(flow_group_in, 0, inlen);
521 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
522 MLX5_MATCH_MISC_PARAMETERS);
523
524 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
525
526 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
527 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
528
529 ix = nvports + MAX_PF_SQ;
530 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
531 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
532
533 g = mlx5_create_flow_group(fdb, flow_group_in);
534 if (IS_ERR(g)) {
535 err = PTR_ERR(g);
536 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
537 goto send_vport_err;
538 }
539 esw->fdb_table.offloads.send_to_vport_grp = g;
540
541 /* create miss group */
542 memset(flow_group_in, 0, inlen);
543 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
544
545 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
546 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
547
548 g = mlx5_create_flow_group(fdb, flow_group_in);
549 if (IS_ERR(g)) {
550 err = PTR_ERR(g);
551 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
552 goto miss_err;
553 }
554 esw->fdb_table.offloads.miss_grp = g;
555
556 err = esw_add_fdb_miss_rule(esw);
557 if (err)
558 goto miss_rule_err;
559
560 kvfree(flow_group_in);
561 return 0;
562
563 miss_rule_err:
564 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
565 miss_err:
566 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
567 send_vport_err:
568 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
569 slow_fdb_err:
570 mlx5_destroy_flow_table(esw->fdb_table.fdb);
571 fast_fdb_err:
572 ns_err:
573 kvfree(flow_group_in);
574 return err;
575 }
576
esw_destroy_offloads_fdb_tables(struct mlx5_eswitch * esw)577 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
578 {
579 if (!esw->fdb_table.fdb)
580 return;
581
582 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
583 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
584 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
585 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
586
587 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
588 esw_destroy_offloads_fast_fdb_table(esw);
589 }
590
esw_create_offloads_table(struct mlx5_eswitch * esw)591 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
592 {
593 struct mlx5_flow_table_attr ft_attr = {};
594 struct mlx5_core_dev *dev = esw->dev;
595 struct mlx5_flow_table *ft_offloads;
596 struct mlx5_flow_namespace *ns;
597 int err = 0;
598
599 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
600 if (!ns) {
601 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
602 return -EOPNOTSUPP;
603 }
604
605 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
606
607 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
608 if (IS_ERR(ft_offloads)) {
609 err = PTR_ERR(ft_offloads);
610 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
611 return err;
612 }
613
614 esw->offloads.ft_offloads = ft_offloads;
615 return 0;
616 }
617
esw_destroy_offloads_table(struct mlx5_eswitch * esw)618 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
619 {
620 struct mlx5_esw_offload *offloads = &esw->offloads;
621
622 mlx5_destroy_flow_table(offloads->ft_offloads);
623 }
624
esw_create_vport_rx_group(struct mlx5_eswitch * esw)625 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
626 {
627 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
628 struct mlx5_flow_group *g;
629 struct mlx5_priv *priv = &esw->dev->priv;
630 u32 *flow_group_in;
631 void *match_criteria, *misc;
632 int err = 0;
633 int nvports = priv->sriov.num_vfs + 2;
634
635 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
636 if (!flow_group_in)
637 return -ENOMEM;
638
639 /* create vport rx group */
640 memset(flow_group_in, 0, inlen);
641 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
642 MLX5_MATCH_MISC_PARAMETERS);
643
644 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
645 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
646 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
647
648 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
649 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
650
651 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
652
653 if (IS_ERR(g)) {
654 err = PTR_ERR(g);
655 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
656 goto out;
657 }
658
659 esw->offloads.vport_rx_group = g;
660 out:
661 kfree(flow_group_in);
662 return err;
663 }
664
esw_destroy_vport_rx_group(struct mlx5_eswitch * esw)665 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
666 {
667 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
668 }
669
670 struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch * esw,int vport,u32 tirn)671 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
672 {
673 struct mlx5_flow_act flow_act = {0};
674 struct mlx5_flow_destination dest;
675 struct mlx5_flow_handle *flow_rule;
676 struct mlx5_flow_spec *spec;
677 void *misc;
678
679 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
680 if (!spec) {
681 flow_rule = ERR_PTR(-ENOMEM);
682 goto out;
683 }
684
685 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
686 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
687
688 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
689 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
690
691 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
692 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
693 dest.tir_num = tirn;
694
695 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
696 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
697 &flow_act, &dest, 1);
698 if (IS_ERR(flow_rule)) {
699 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
700 goto out;
701 }
702
703 out:
704 kvfree(spec);
705 return flow_rule;
706 }
707
esw_offloads_start(struct mlx5_eswitch * esw)708 static int esw_offloads_start(struct mlx5_eswitch *esw)
709 {
710 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
711
712 if (esw->mode != SRIOV_LEGACY) {
713 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
714 return -EINVAL;
715 }
716
717 mlx5_eswitch_disable_sriov(esw);
718 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
719 if (err) {
720 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
721 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
722 if (err1)
723 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
724 }
725 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
726 if (mlx5_eswitch_inline_mode_get(esw,
727 num_vfs,
728 &esw->offloads.inline_mode)) {
729 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
730 esw_warn(esw->dev, "Inline mode is different between vports\n");
731 }
732 }
733 return err;
734 }
735
esw_offloads_init(struct mlx5_eswitch * esw,int nvports)736 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
737 {
738 struct mlx5_eswitch_rep *rep;
739 int vport;
740 int err;
741
742 /* disable PF RoCE so missed packets don't go through RoCE steering */
743 mlx5_dev_list_lock();
744 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
745 mlx5_dev_list_unlock();
746
747 err = esw_create_offloads_fdb_tables(esw, nvports);
748 if (err)
749 goto create_fdb_err;
750
751 err = esw_create_offloads_table(esw);
752 if (err)
753 goto create_ft_err;
754
755 err = esw_create_vport_rx_group(esw);
756 if (err)
757 goto create_fg_err;
758
759 for (vport = 0; vport < nvports; vport++) {
760 rep = &esw->offloads.vport_reps[vport];
761 if (!rep->valid)
762 continue;
763
764 err = rep->load(esw, rep);
765 if (err)
766 goto err_reps;
767 }
768
769 return 0;
770
771 err_reps:
772 for (vport--; vport >= 0; vport--) {
773 rep = &esw->offloads.vport_reps[vport];
774 if (!rep->valid)
775 continue;
776 rep->unload(esw, rep);
777 }
778 esw_destroy_vport_rx_group(esw);
779
780 create_fg_err:
781 esw_destroy_offloads_table(esw);
782
783 create_ft_err:
784 esw_destroy_offloads_fdb_tables(esw);
785
786 create_fdb_err:
787 /* enable back PF RoCE */
788 mlx5_dev_list_lock();
789 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
790 mlx5_dev_list_unlock();
791
792 return err;
793 }
794
esw_offloads_stop(struct mlx5_eswitch * esw)795 static int esw_offloads_stop(struct mlx5_eswitch *esw)
796 {
797 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
798
799 mlx5_eswitch_disable_sriov(esw);
800 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
801 if (err) {
802 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
803 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
804 if (err1)
805 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
806 }
807
808 /* enable back PF RoCE */
809 mlx5_dev_list_lock();
810 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
811 mlx5_dev_list_unlock();
812
813 return err;
814 }
815
esw_offloads_cleanup(struct mlx5_eswitch * esw,int nvports)816 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
817 {
818 struct mlx5_eswitch_rep *rep;
819 int vport;
820
821 for (vport = nvports - 1; vport >= 0; vport--) {
822 rep = &esw->offloads.vport_reps[vport];
823 if (!rep->valid)
824 continue;
825 rep->unload(esw, rep);
826 }
827
828 esw_destroy_vport_rx_group(esw);
829 esw_destroy_offloads_table(esw);
830 esw_destroy_offloads_fdb_tables(esw);
831 }
832
esw_mode_from_devlink(u16 mode,u16 * mlx5_mode)833 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
834 {
835 switch (mode) {
836 case DEVLINK_ESWITCH_MODE_LEGACY:
837 *mlx5_mode = SRIOV_LEGACY;
838 break;
839 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
840 *mlx5_mode = SRIOV_OFFLOADS;
841 break;
842 default:
843 return -EINVAL;
844 }
845
846 return 0;
847 }
848
esw_mode_to_devlink(u16 mlx5_mode,u16 * mode)849 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
850 {
851 switch (mlx5_mode) {
852 case SRIOV_LEGACY:
853 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
854 break;
855 case SRIOV_OFFLOADS:
856 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
857 break;
858 default:
859 return -EINVAL;
860 }
861
862 return 0;
863 }
864
esw_inline_mode_from_devlink(u8 mode,u8 * mlx5_mode)865 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
866 {
867 switch (mode) {
868 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
869 *mlx5_mode = MLX5_INLINE_MODE_NONE;
870 break;
871 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
872 *mlx5_mode = MLX5_INLINE_MODE_L2;
873 break;
874 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
875 *mlx5_mode = MLX5_INLINE_MODE_IP;
876 break;
877 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
878 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
879 break;
880 default:
881 return -EINVAL;
882 }
883
884 return 0;
885 }
886
esw_inline_mode_to_devlink(u8 mlx5_mode,u8 * mode)887 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
888 {
889 switch (mlx5_mode) {
890 case MLX5_INLINE_MODE_NONE:
891 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
892 break;
893 case MLX5_INLINE_MODE_L2:
894 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
895 break;
896 case MLX5_INLINE_MODE_IP:
897 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
898 break;
899 case MLX5_INLINE_MODE_TCP_UDP:
900 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
901 break;
902 default:
903 return -EINVAL;
904 }
905
906 return 0;
907 }
908
mlx5_devlink_eswitch_check(struct devlink * devlink)909 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
910 {
911 struct mlx5_core_dev *dev = devlink_priv(devlink);
912
913 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
914 return -EOPNOTSUPP;
915
916 if(!MLX5_ESWITCH_MANAGER(dev))
917 return -EPERM;
918
919 if (dev->priv.eswitch->mode == SRIOV_NONE)
920 return -EOPNOTSUPP;
921
922 return 0;
923 }
924
mlx5_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode)925 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
926 {
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 u16 cur_mlx5_mode, mlx5_mode = 0;
929 int err;
930
931 err = mlx5_devlink_eswitch_check(devlink);
932 if (err)
933 return err;
934
935 cur_mlx5_mode = dev->priv.eswitch->mode;
936
937 if (esw_mode_from_devlink(mode, &mlx5_mode))
938 return -EINVAL;
939
940 if (cur_mlx5_mode == mlx5_mode)
941 return 0;
942
943 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
944 return esw_offloads_start(dev->priv.eswitch);
945 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
946 return esw_offloads_stop(dev->priv.eswitch);
947 else
948 return -EINVAL;
949 }
950
mlx5_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)951 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
952 {
953 struct mlx5_core_dev *dev = devlink_priv(devlink);
954 int err;
955
956 err = mlx5_devlink_eswitch_check(devlink);
957 if (err)
958 return err;
959
960 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
961 }
962
mlx5_devlink_eswitch_inline_mode_set(struct devlink * devlink,u8 mode)963 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
964 {
965 struct mlx5_core_dev *dev = devlink_priv(devlink);
966 struct mlx5_eswitch *esw = dev->priv.eswitch;
967 int err, vport;
968 u8 mlx5_mode;
969
970 err = mlx5_devlink_eswitch_check(devlink);
971 if (err)
972 return err;
973
974 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
975 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
976 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
977 return 0;
978 /* fall through */
979 case MLX5_CAP_INLINE_MODE_L2:
980 esw_warn(dev, "Inline mode can't be set\n");
981 return -EOPNOTSUPP;
982 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
983 break;
984 }
985
986 if (esw->offloads.num_flows > 0) {
987 esw_warn(dev, "Can't set inline mode when flows are configured\n");
988 return -EOPNOTSUPP;
989 }
990
991 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
992 if (err)
993 goto out;
994
995 for (vport = 1; vport < esw->enabled_vports; vport++) {
996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
997 if (err) {
998 esw_warn(dev, "Failed to set min inline on vport %d\n",
999 vport);
1000 goto revert_inline_mode;
1001 }
1002 }
1003
1004 esw->offloads.inline_mode = mlx5_mode;
1005 return 0;
1006
1007 revert_inline_mode:
1008 while (--vport > 0)
1009 mlx5_modify_nic_vport_min_inline(dev,
1010 vport,
1011 esw->offloads.inline_mode);
1012 out:
1013 return err;
1014 }
1015
mlx5_devlink_eswitch_inline_mode_get(struct devlink * devlink,u8 * mode)1016 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1017 {
1018 struct mlx5_core_dev *dev = devlink_priv(devlink);
1019 struct mlx5_eswitch *esw = dev->priv.eswitch;
1020 int err;
1021
1022 err = mlx5_devlink_eswitch_check(devlink);
1023 if (err)
1024 return err;
1025
1026 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1027 }
1028
mlx5_eswitch_inline_mode_get(struct mlx5_eswitch * esw,int nvfs,u8 * mode)1029 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1030 {
1031 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1032 struct mlx5_core_dev *dev = esw->dev;
1033 int vport;
1034
1035 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1036 return -EOPNOTSUPP;
1037
1038 if (esw->mode == SRIOV_NONE)
1039 return -EOPNOTSUPP;
1040
1041 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1042 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1043 mlx5_mode = MLX5_INLINE_MODE_NONE;
1044 goto out;
1045 case MLX5_CAP_INLINE_MODE_L2:
1046 mlx5_mode = MLX5_INLINE_MODE_L2;
1047 goto out;
1048 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1049 goto query_vports;
1050 }
1051
1052 query_vports:
1053 for (vport = 1; vport <= nvfs; vport++) {
1054 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1055 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1056 return -EINVAL;
1057 prev_mlx5_mode = mlx5_mode;
1058 }
1059
1060 out:
1061 *mode = mlx5_mode;
1062 return 0;
1063 }
1064
mlx5_devlink_eswitch_encap_mode_set(struct devlink * devlink,u8 encap)1065 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1066 {
1067 struct mlx5_core_dev *dev = devlink_priv(devlink);
1068 struct mlx5_eswitch *esw = dev->priv.eswitch;
1069 int err;
1070
1071 err = mlx5_devlink_eswitch_check(devlink);
1072 if (err)
1073 return err;
1074
1075 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1076 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1077 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1078 return -EOPNOTSUPP;
1079
1080 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1081 return -EOPNOTSUPP;
1082
1083 if (esw->mode == SRIOV_LEGACY) {
1084 esw->offloads.encap = encap;
1085 return 0;
1086 }
1087
1088 if (esw->offloads.encap == encap)
1089 return 0;
1090
1091 if (esw->offloads.num_flows > 0) {
1092 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1093 return -EOPNOTSUPP;
1094 }
1095
1096 esw_destroy_offloads_fast_fdb_table(esw);
1097
1098 esw->offloads.encap = encap;
1099 err = esw_create_offloads_fast_fdb_table(esw);
1100 if (err) {
1101 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1102 esw->offloads.encap = !encap;
1103 (void)esw_create_offloads_fast_fdb_table(esw);
1104 }
1105 return err;
1106 }
1107
mlx5_devlink_eswitch_encap_mode_get(struct devlink * devlink,u8 * encap)1108 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1109 {
1110 struct mlx5_core_dev *dev = devlink_priv(devlink);
1111 struct mlx5_eswitch *esw = dev->priv.eswitch;
1112 int err;
1113
1114 err = mlx5_devlink_eswitch_check(devlink);
1115 if (err)
1116 return err;
1117
1118 *encap = esw->offloads.encap;
1119 return 0;
1120 }
1121
mlx5_eswitch_register_vport_rep(struct mlx5_eswitch * esw,int vport_index,struct mlx5_eswitch_rep * __rep)1122 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1123 int vport_index,
1124 struct mlx5_eswitch_rep *__rep)
1125 {
1126 struct mlx5_esw_offload *offloads = &esw->offloads;
1127 struct mlx5_eswitch_rep *rep;
1128
1129 rep = &offloads->vport_reps[vport_index];
1130
1131 memset(rep, 0, sizeof(*rep));
1132
1133 rep->load = __rep->load;
1134 rep->unload = __rep->unload;
1135 rep->vport = __rep->vport;
1136 rep->netdev = __rep->netdev;
1137 ether_addr_copy(rep->hw_id, __rep->hw_id);
1138
1139 INIT_LIST_HEAD(&rep->vport_sqs_list);
1140 rep->valid = true;
1141 }
1142
mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch * esw,int vport_index)1143 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1144 int vport_index)
1145 {
1146 struct mlx5_esw_offload *offloads = &esw->offloads;
1147 struct mlx5_eswitch_rep *rep;
1148
1149 rep = &offloads->vport_reps[vport_index];
1150
1151 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1152 rep->unload(esw, rep);
1153
1154 rep->valid = false;
1155 }
1156
mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch * esw)1157 struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1158 {
1159 #define UPLINK_REP_INDEX 0
1160 struct mlx5_esw_offload *offloads = &esw->offloads;
1161 struct mlx5_eswitch_rep *rep;
1162
1163 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1164 return rep->netdev;
1165 }
1166