1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39
40 #include "eswitch.h"
41 #include "en.h"
42 #include "en_rep.h"
43 #include "en_tc.h"
44 #include "fs_core.h"
45
46 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
47 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
48 #define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
49 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
50
51 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
52
mlx5e_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)53 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
54 struct ethtool_drvinfo *drvinfo)
55 {
56 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
57 sizeof(drvinfo->driver));
58 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
59 }
60
61 static const struct counter_desc sw_rep_stats_desc[] = {
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
66 };
67
68 #define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
69
mlx5e_rep_get_strings(struct net_device * dev,u32 stringset,uint8_t * data)70 static void mlx5e_rep_get_strings(struct net_device *dev,
71 u32 stringset, uint8_t *data)
72 {
73 int i;
74
75 switch (stringset) {
76 case ETH_SS_STATS:
77 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
78 strcpy(data + (i * ETH_GSTRING_LEN),
79 sw_rep_stats_desc[i].format);
80 break;
81 }
82 }
83
mlx5e_rep_update_hw_counters(struct mlx5e_priv * priv)84 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
85 {
86 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
87 struct mlx5e_rep_priv *rpriv = priv->ppriv;
88 struct mlx5_eswitch_rep *rep = rpriv->rep;
89 struct rtnl_link_stats64 *vport_stats;
90 struct ifla_vf_stats vf_stats;
91 int err;
92
93 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
94 if (err) {
95 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
96 return;
97 }
98
99 vport_stats = &priv->stats.vf_vport;
100 /* flip tx/rx as we are reporting the counters for the switch vport */
101 vport_stats->rx_packets = vf_stats.tx_packets;
102 vport_stats->rx_bytes = vf_stats.tx_bytes;
103 vport_stats->tx_packets = vf_stats.rx_packets;
104 vport_stats->tx_bytes = vf_stats.rx_bytes;
105 }
106
mlx5e_rep_update_sw_counters(struct mlx5e_priv * priv)107 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
108 {
109 struct mlx5e_sw_stats *s = &priv->stats.sw;
110 struct mlx5e_rq_stats *rq_stats;
111 struct mlx5e_sq_stats *sq_stats;
112 int i, j;
113
114 memset(s, 0, sizeof(*s));
115 for (i = 0; i < priv->channels.num; i++) {
116 struct mlx5e_channel *c = priv->channels.c[i];
117
118 rq_stats = &c->rq.stats;
119
120 s->rx_packets += rq_stats->packets;
121 s->rx_bytes += rq_stats->bytes;
122
123 for (j = 0; j < priv->channels.params.num_tc; j++) {
124 sq_stats = &c->sq[j].stats;
125
126 s->tx_packets += sq_stats->packets;
127 s->tx_bytes += sq_stats->bytes;
128 s->tx_queue_dropped += sq_stats->dropped;
129 }
130 }
131 }
132
mlx5e_rep_update_stats(struct mlx5e_priv * priv)133 static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
134 {
135 mlx5e_rep_update_sw_counters(priv);
136 mlx5e_rep_update_hw_counters(priv);
137 }
138
mlx5e_rep_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)139 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
140 struct ethtool_stats *stats, u64 *data)
141 {
142 struct mlx5e_priv *priv = netdev_priv(dev);
143 int i;
144
145 if (!data)
146 return;
147
148 mutex_lock(&priv->state_lock);
149 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
150 mlx5e_rep_update_sw_counters(priv);
151 mutex_unlock(&priv->state_lock);
152
153 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
154 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
155 sw_rep_stats_desc, i);
156 }
157
mlx5e_rep_get_sset_count(struct net_device * dev,int sset)158 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
159 {
160 switch (sset) {
161 case ETH_SS_STATS:
162 return NUM_VPORT_REP_COUNTERS;
163 default:
164 return -EOPNOTSUPP;
165 }
166 }
167
168 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
169 .get_drvinfo = mlx5e_rep_get_drvinfo,
170 .get_link = ethtool_op_get_link,
171 .get_strings = mlx5e_rep_get_strings,
172 .get_sset_count = mlx5e_rep_get_sset_count,
173 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
174 };
175
mlx5e_attr_get(struct net_device * dev,struct switchdev_attr * attr)176 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
177 {
178 struct mlx5e_priv *priv = netdev_priv(dev);
179 struct mlx5e_rep_priv *rpriv = priv->ppriv;
180 struct mlx5_eswitch_rep *rep = rpriv->rep;
181 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
182
183 if (esw->mode == SRIOV_NONE)
184 return -EOPNOTSUPP;
185
186 switch (attr->id) {
187 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
188 attr->u.ppid.id_len = ETH_ALEN;
189 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
190 break;
191 default:
192 return -EOPNOTSUPP;
193 }
194
195 return 0;
196 }
197
mlx5e_add_sqs_fwd_rules(struct mlx5e_priv * priv)198 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
199 {
200 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
201 struct mlx5e_rep_priv *rpriv = priv->ppriv;
202 struct mlx5_eswitch_rep *rep = rpriv->rep;
203 struct mlx5e_channel *c;
204 int n, tc, num_sqs = 0;
205 int err = -ENOMEM;
206 u16 *sqs;
207
208 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
209 if (!sqs)
210 goto out;
211
212 for (n = 0; n < priv->channels.num; n++) {
213 c = priv->channels.c[n];
214 for (tc = 0; tc < c->num_tc; tc++)
215 sqs[num_sqs++] = c->sq[tc].sqn;
216 }
217
218 err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
219 kfree(sqs);
220
221 out:
222 if (err)
223 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
224 return err;
225 }
226
mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv * priv)227 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
228 {
229 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
230 struct mlx5e_rep_priv *rpriv = priv->ppriv;
231 struct mlx5_eswitch_rep *rep = rpriv->rep;
232
233 mlx5_eswitch_sqs2vport_stop(esw, rep);
234 }
235
mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv * rpriv)236 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
237 {
238 #if IS_ENABLED(CONFIG_IPV6)
239 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
240 DELAY_PROBE_TIME);
241 #else
242 unsigned long ipv6_interval = ~0UL;
243 #endif
244 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
245 DELAY_PROBE_TIME);
246 struct net_device *netdev = rpriv->rep->netdev;
247 struct mlx5e_priv *priv = netdev_priv(netdev);
248
249 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
250 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
251 }
252
mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv * priv)253 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
254 {
255 struct mlx5e_rep_priv *rpriv = priv->ppriv;
256 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
257
258 mlx5_fc_queue_stats_work(priv->mdev,
259 &neigh_update->neigh_stats_work,
260 neigh_update->min_interval);
261 }
262
mlx5e_rep_neigh_stats_work(struct work_struct * work)263 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
264 {
265 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
266 neigh_update.neigh_stats_work.work);
267 struct net_device *netdev = rpriv->rep->netdev;
268 struct mlx5e_priv *priv = netdev_priv(netdev);
269 struct mlx5e_neigh_hash_entry *nhe;
270
271 rtnl_lock();
272 if (!list_empty(&rpriv->neigh_update.neigh_list))
273 mlx5e_rep_queue_neigh_stats_work(priv);
274
275 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
276 mlx5e_tc_update_neigh_used_value(nhe);
277
278 rtnl_unlock();
279 }
280
mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry * nhe)281 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
282 {
283 refcount_inc(&nhe->refcnt);
284 }
285
mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry * nhe)286 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
287 {
288 if (refcount_dec_and_test(&nhe->refcnt))
289 kfree(nhe);
290 }
291
mlx5e_rep_update_flows(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,bool neigh_connected,unsigned char ha[ETH_ALEN])292 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
293 struct mlx5e_encap_entry *e,
294 bool neigh_connected,
295 unsigned char ha[ETH_ALEN])
296 {
297 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
298
299 ASSERT_RTNL();
300
301 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
302 !ether_addr_equal(e->h_dest, ha))
303 mlx5e_tc_encap_flows_del(priv, e);
304
305 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
306 ether_addr_copy(e->h_dest, ha);
307 ether_addr_copy(eth->h_dest, ha);
308
309 mlx5e_tc_encap_flows_add(priv, e);
310 }
311 }
312
mlx5e_rep_neigh_update(struct work_struct * work)313 static void mlx5e_rep_neigh_update(struct work_struct *work)
314 {
315 struct mlx5e_neigh_hash_entry *nhe =
316 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
317 struct neighbour *n = nhe->n;
318 struct mlx5e_encap_entry *e;
319 unsigned char ha[ETH_ALEN];
320 struct mlx5e_priv *priv;
321 bool neigh_connected;
322 bool encap_connected;
323 u8 nud_state, dead;
324
325 rtnl_lock();
326
327 /* If these parameters are changed after we release the lock,
328 * we'll receive another event letting us know about it.
329 * We use this lock to avoid inconsistency between the neigh validity
330 * and it's hw address.
331 */
332 read_lock_bh(&n->lock);
333 memcpy(ha, n->ha, ETH_ALEN);
334 nud_state = n->nud_state;
335 dead = n->dead;
336 read_unlock_bh(&n->lock);
337
338 neigh_connected = (nud_state & NUD_VALID) && !dead;
339
340 list_for_each_entry(e, &nhe->encap_list, encap_list) {
341 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
342 priv = netdev_priv(e->out_dev);
343
344 if (encap_connected != neigh_connected ||
345 !ether_addr_equal(e->h_dest, ha))
346 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
347 }
348 mlx5e_rep_neigh_entry_release(nhe);
349 rtnl_unlock();
350 neigh_release(n);
351 }
352
353 static struct mlx5e_neigh_hash_entry *
354 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
355 struct mlx5e_neigh *m_neigh);
356
mlx5e_rep_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)357 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
358 unsigned long event, void *ptr)
359 {
360 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
361 neigh_update.netevent_nb);
362 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
363 struct net_device *netdev = rpriv->rep->netdev;
364 struct mlx5e_priv *priv = netdev_priv(netdev);
365 struct mlx5e_neigh_hash_entry *nhe = NULL;
366 struct mlx5e_neigh m_neigh = {};
367 struct neigh_parms *p;
368 struct neighbour *n;
369 bool found = false;
370
371 switch (event) {
372 case NETEVENT_NEIGH_UPDATE:
373 n = ptr;
374 #if IS_ENABLED(CONFIG_IPV6)
375 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
376 #else
377 if (n->tbl != &arp_tbl)
378 #endif
379 return NOTIFY_DONE;
380
381 m_neigh.dev = n->dev;
382 m_neigh.family = n->ops->family;
383 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
384
385 /* We are in atomic context and can't take RTNL mutex, so use
386 * spin_lock_bh to lookup the neigh table. bh is used since
387 * netevent can be called from a softirq context.
388 */
389 spin_lock_bh(&neigh_update->encap_lock);
390 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
391 if (!nhe) {
392 spin_unlock_bh(&neigh_update->encap_lock);
393 return NOTIFY_DONE;
394 }
395
396 /* This assignment is valid as long as the the neigh reference
397 * is taken
398 */
399 nhe->n = n;
400
401 /* Take a reference to ensure the neighbour and mlx5 encap
402 * entry won't be destructed until we drop the reference in
403 * delayed work.
404 */
405 neigh_hold(n);
406 mlx5e_rep_neigh_entry_hold(nhe);
407
408 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
409 mlx5e_rep_neigh_entry_release(nhe);
410 neigh_release(n);
411 }
412 spin_unlock_bh(&neigh_update->encap_lock);
413 break;
414
415 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
416 p = ptr;
417
418 /* We check the device is present since we don't care about
419 * changes in the default table, we only care about changes
420 * done per device delay prob time parameter.
421 */
422 #if IS_ENABLED(CONFIG_IPV6)
423 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
424 #else
425 if (!p->dev || p->tbl != &arp_tbl)
426 #endif
427 return NOTIFY_DONE;
428
429 /* We are in atomic context and can't take RTNL mutex,
430 * so use spin_lock_bh to walk the neigh list and look for
431 * the relevant device. bh is used since netevent can be
432 * called from a softirq context.
433 */
434 spin_lock_bh(&neigh_update->encap_lock);
435 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
436 if (p->dev == nhe->m_neigh.dev) {
437 found = true;
438 break;
439 }
440 }
441 spin_unlock_bh(&neigh_update->encap_lock);
442 if (!found)
443 return NOTIFY_DONE;
444
445 neigh_update->min_interval = min_t(unsigned long,
446 NEIGH_VAR(p, DELAY_PROBE_TIME),
447 neigh_update->min_interval);
448 mlx5_fc_update_sampling_interval(priv->mdev,
449 neigh_update->min_interval);
450 break;
451 }
452 return NOTIFY_DONE;
453 }
454
455 static const struct rhashtable_params mlx5e_neigh_ht_params = {
456 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
457 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
458 .key_len = sizeof(struct mlx5e_neigh),
459 .automatic_shrinking = true,
460 };
461
mlx5e_rep_neigh_init(struct mlx5e_rep_priv * rpriv)462 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
463 {
464 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
465 int err;
466
467 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
468 if (err)
469 return err;
470
471 INIT_LIST_HEAD(&neigh_update->neigh_list);
472 spin_lock_init(&neigh_update->encap_lock);
473 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
474 mlx5e_rep_neigh_stats_work);
475 mlx5e_rep_neigh_update_init_interval(rpriv);
476
477 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
478 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
479 if (err)
480 goto out_err;
481 return 0;
482
483 out_err:
484 rhashtable_destroy(&neigh_update->neigh_ht);
485 return err;
486 }
487
mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv * rpriv)488 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
489 {
490 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
491 struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
492
493 unregister_netevent_notifier(&neigh_update->netevent_nb);
494
495 flush_workqueue(priv->wq); /* flush neigh update works */
496
497 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
498
499 rhashtable_destroy(&neigh_update->neigh_ht);
500 }
501
mlx5e_rep_neigh_entry_insert(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)502 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
503 struct mlx5e_neigh_hash_entry *nhe)
504 {
505 struct mlx5e_rep_priv *rpriv = priv->ppriv;
506 int err;
507
508 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
509 &nhe->rhash_node,
510 mlx5e_neigh_ht_params);
511 if (err)
512 return err;
513
514 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
515
516 return err;
517 }
518
mlx5e_rep_neigh_entry_remove(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)519 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
520 struct mlx5e_neigh_hash_entry *nhe)
521 {
522 struct mlx5e_rep_priv *rpriv = priv->ppriv;
523
524 spin_lock_bh(&rpriv->neigh_update.encap_lock);
525
526 list_del(&nhe->neigh_list);
527
528 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
529 &nhe->rhash_node,
530 mlx5e_neigh_ht_params);
531 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
532 }
533
534 /* This function must only be called under RTNL lock or under the
535 * representor's encap_lock in case RTNL mutex can't be held.
536 */
537 static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv * priv,struct mlx5e_neigh * m_neigh)538 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
539 struct mlx5e_neigh *m_neigh)
540 {
541 struct mlx5e_rep_priv *rpriv = priv->ppriv;
542 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
543
544 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
545 mlx5e_neigh_ht_params);
546 }
547
mlx5e_rep_neigh_entry_create(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,struct mlx5e_neigh_hash_entry ** nhe)548 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
549 struct mlx5e_encap_entry *e,
550 struct mlx5e_neigh_hash_entry **nhe)
551 {
552 int err;
553
554 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
555 if (!*nhe)
556 return -ENOMEM;
557
558 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
559 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
560 INIT_LIST_HEAD(&(*nhe)->encap_list);
561 refcount_set(&(*nhe)->refcnt, 1);
562
563 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
564 if (err)
565 goto out_free;
566 return 0;
567
568 out_free:
569 kfree(*nhe);
570 return err;
571 }
572
mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)573 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
574 struct mlx5e_neigh_hash_entry *nhe)
575 {
576 /* The neigh hash entry must be removed from the hash table regardless
577 * of the reference count value, so it won't be found by the next
578 * neigh notification call. The neigh hash entry reference count is
579 * incremented only during creation and neigh notification calls and
580 * protects from freeing the nhe struct.
581 */
582 mlx5e_rep_neigh_entry_remove(priv, nhe);
583 mlx5e_rep_neigh_entry_release(nhe);
584 }
585
mlx5e_rep_encap_entry_attach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)586 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
587 struct mlx5e_encap_entry *e)
588 {
589 struct mlx5e_neigh_hash_entry *nhe;
590 int err;
591
592 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
593 if (!nhe) {
594 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
595 if (err)
596 return err;
597 }
598 list_add(&e->encap_list, &nhe->encap_list);
599 return 0;
600 }
601
mlx5e_rep_encap_entry_detach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)602 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
603 struct mlx5e_encap_entry *e)
604 {
605 struct mlx5e_neigh_hash_entry *nhe;
606
607 list_del(&e->encap_list);
608 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
609
610 if (list_empty(&nhe->encap_list))
611 mlx5e_rep_neigh_entry_destroy(priv, nhe);
612 }
613
mlx5e_rep_open(struct net_device * dev)614 static int mlx5e_rep_open(struct net_device *dev)
615 {
616 struct mlx5e_priv *priv = netdev_priv(dev);
617 struct mlx5e_rep_priv *rpriv = priv->ppriv;
618 struct mlx5_eswitch_rep *rep = rpriv->rep;
619 int err;
620
621 mutex_lock(&priv->state_lock);
622 err = mlx5e_open_locked(dev);
623 if (err)
624 goto unlock;
625
626 if (!mlx5_modify_vport_admin_state(priv->mdev,
627 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
628 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
629 netif_carrier_on(dev);
630
631 unlock:
632 mutex_unlock(&priv->state_lock);
633 return err;
634 }
635
mlx5e_rep_close(struct net_device * dev)636 static int mlx5e_rep_close(struct net_device *dev)
637 {
638 struct mlx5e_priv *priv = netdev_priv(dev);
639 struct mlx5e_rep_priv *rpriv = priv->ppriv;
640 struct mlx5_eswitch_rep *rep = rpriv->rep;
641 int ret;
642
643 mutex_lock(&priv->state_lock);
644 mlx5_modify_vport_admin_state(priv->mdev,
645 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
646 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
647 ret = mlx5e_close_locked(dev);
648 mutex_unlock(&priv->state_lock);
649 return ret;
650 }
651
mlx5e_rep_get_phys_port_name(struct net_device * dev,char * buf,size_t len)652 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
653 char *buf, size_t len)
654 {
655 struct mlx5e_priv *priv = netdev_priv(dev);
656 struct mlx5e_rep_priv *rpriv = priv->ppriv;
657 struct mlx5_eswitch_rep *rep = rpriv->rep;
658 int ret;
659
660 ret = snprintf(buf, len, "%d", rep->vport - 1);
661 if (ret >= len)
662 return -EOPNOTSUPP;
663
664 return 0;
665 }
666
667 static int
mlx5e_rep_setup_tc_cls_flower(struct net_device * dev,struct tc_cls_flower_offload * cls_flower)668 mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
669 struct tc_cls_flower_offload *cls_flower)
670 {
671 struct mlx5e_priv *priv = netdev_priv(dev);
672
673 if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
674 cls_flower->common.chain_index)
675 return -EOPNOTSUPP;
676
677 if (cls_flower->egress_dev) {
678 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
679
680 dev = mlx5_eswitch_get_uplink_netdev(esw);
681 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
682 cls_flower);
683 }
684
685 switch (cls_flower->command) {
686 case TC_CLSFLOWER_REPLACE:
687 return mlx5e_configure_flower(priv, cls_flower);
688 case TC_CLSFLOWER_DESTROY:
689 return mlx5e_delete_flower(priv, cls_flower);
690 case TC_CLSFLOWER_STATS:
691 return mlx5e_stats_flower(priv, cls_flower);
692 default:
693 return -EOPNOTSUPP;
694 }
695 }
696
mlx5e_rep_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)697 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
698 void *type_data)
699 {
700 switch (type) {
701 case TC_SETUP_CLSFLOWER:
702 return mlx5e_rep_setup_tc_cls_flower(dev, type_data);
703 default:
704 return -EOPNOTSUPP;
705 }
706 }
707
mlx5e_is_uplink_rep(struct mlx5e_priv * priv)708 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
709 {
710 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
711 struct mlx5e_rep_priv *rpriv = priv->ppriv;
712 struct mlx5_eswitch_rep *rep;
713
714 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
715 return false;
716
717 rep = rpriv->rep;
718 if (esw->mode == SRIOV_OFFLOADS &&
719 rep && rep->vport == FDB_UPLINK_VPORT)
720 return true;
721
722 return false;
723 }
724
mlx5e_is_vf_vport_rep(struct mlx5e_priv * priv)725 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
726 {
727 struct mlx5e_rep_priv *rpriv = priv->ppriv;
728 struct mlx5_eswitch_rep *rep;
729
730 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
731 return false;
732
733 rep = rpriv->rep;
734 if (rep && rep->vport != FDB_UPLINK_VPORT)
735 return true;
736
737 return false;
738 }
739
mlx5e_has_offload_stats(const struct net_device * dev,int attr_id)740 bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
741 {
742 struct mlx5e_priv *priv = netdev_priv(dev);
743
744 switch (attr_id) {
745 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
746 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
747 return true;
748 }
749
750 return false;
751 }
752
753 static int
mlx5e_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)754 mlx5e_get_sw_stats64(const struct net_device *dev,
755 struct rtnl_link_stats64 *stats)
756 {
757 struct mlx5e_priv *priv = netdev_priv(dev);
758 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
759
760 stats->rx_packets = sstats->rx_packets;
761 stats->rx_bytes = sstats->rx_bytes;
762 stats->tx_packets = sstats->tx_packets;
763 stats->tx_bytes = sstats->tx_bytes;
764
765 stats->tx_dropped = sstats->tx_queue_dropped;
766
767 return 0;
768 }
769
mlx5e_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)770 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
771 void *sp)
772 {
773 switch (attr_id) {
774 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
775 return mlx5e_get_sw_stats64(dev, sp);
776 }
777
778 return -EINVAL;
779 }
780
781 static void
mlx5e_rep_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)782 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
783 {
784 struct mlx5e_priv *priv = netdev_priv(dev);
785
786 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
787 }
788
789 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
790 .switchdev_port_attr_get = mlx5e_attr_get,
791 };
792
793 static const struct net_device_ops mlx5e_netdev_ops_rep = {
794 .ndo_open = mlx5e_rep_open,
795 .ndo_stop = mlx5e_rep_close,
796 .ndo_start_xmit = mlx5e_xmit,
797 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
798 .ndo_setup_tc = mlx5e_rep_setup_tc,
799 .ndo_get_stats64 = mlx5e_rep_get_stats,
800 .ndo_has_offload_stats = mlx5e_has_offload_stats,
801 .ndo_get_offload_stats = mlx5e_get_offload_stats,
802 };
803
mlx5e_build_rep_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)804 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
805 struct mlx5e_params *params)
806 {
807 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
808 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
809 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
810
811 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
812 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
813 params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
814
815 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
816 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
817
818 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
819 params->num_tc = 1;
820 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
821
822 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
823 }
824
mlx5e_build_rep_netdev(struct net_device * netdev)825 static void mlx5e_build_rep_netdev(struct net_device *netdev)
826 {
827 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
828
829 netdev->watchdog_timeo = 15 * HZ;
830
831 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
832
833 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
834
835 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
836 netdev->hw_features |= NETIF_F_HW_TC;
837
838 eth_hw_addr_random(netdev);
839 }
840
mlx5e_init_rep(struct mlx5_core_dev * mdev,struct net_device * netdev,const struct mlx5e_profile * profile,void * ppriv)841 static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
842 struct net_device *netdev,
843 const struct mlx5e_profile *profile,
844 void *ppriv)
845 {
846 struct mlx5e_priv *priv = netdev_priv(netdev);
847
848 priv->mdev = mdev;
849 priv->netdev = netdev;
850 priv->profile = profile;
851 priv->ppriv = ppriv;
852
853 mutex_init(&priv->state_lock);
854
855 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
856
857 priv->channels.params.num_channels = profile->max_nch(mdev);
858
859 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
860
861 mlx5e_build_rep_params(mdev, &priv->channels.params);
862 mlx5e_build_rep_netdev(netdev);
863 }
864
mlx5e_init_rep_rx(struct mlx5e_priv * priv)865 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
866 {
867 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
868 struct mlx5e_rep_priv *rpriv = priv->ppriv;
869 struct mlx5_eswitch_rep *rep = rpriv->rep;
870 struct mlx5_flow_handle *flow_rule;
871 int err;
872
873 mlx5e_init_l2_addr(priv);
874
875 err = mlx5e_create_direct_rqts(priv);
876 if (err)
877 return err;
878
879 err = mlx5e_create_direct_tirs(priv);
880 if (err)
881 goto err_destroy_direct_rqts;
882
883 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
884 rep->vport,
885 priv->direct_tir[0].tirn);
886 if (IS_ERR(flow_rule)) {
887 err = PTR_ERR(flow_rule);
888 goto err_destroy_direct_tirs;
889 }
890 rep->vport_rx_rule = flow_rule;
891
892 err = mlx5e_tc_init(priv);
893 if (err)
894 goto err_del_flow_rule;
895
896 return 0;
897
898 err_del_flow_rule:
899 mlx5_del_flow_rules(rep->vport_rx_rule);
900 err_destroy_direct_tirs:
901 mlx5e_destroy_direct_tirs(priv);
902 err_destroy_direct_rqts:
903 mlx5e_destroy_direct_rqts(priv);
904 return err;
905 }
906
mlx5e_cleanup_rep_rx(struct mlx5e_priv * priv)907 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
908 {
909 struct mlx5e_rep_priv *rpriv = priv->ppriv;
910 struct mlx5_eswitch_rep *rep = rpriv->rep;
911
912 mlx5e_tc_cleanup(priv);
913 mlx5_del_flow_rules(rep->vport_rx_rule);
914 mlx5e_destroy_direct_tirs(priv);
915 mlx5e_destroy_direct_rqts(priv);
916 }
917
mlx5e_init_rep_tx(struct mlx5e_priv * priv)918 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
919 {
920 int err;
921
922 err = mlx5e_create_tises(priv);
923 if (err) {
924 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
925 return err;
926 }
927 return 0;
928 }
929
mlx5e_get_rep_max_num_channels(struct mlx5_core_dev * mdev)930 static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
931 {
932 #define MLX5E_PORT_REPRESENTOR_NCH 1
933 return MLX5E_PORT_REPRESENTOR_NCH;
934 }
935
936 static const struct mlx5e_profile mlx5e_rep_profile = {
937 .init = mlx5e_init_rep,
938 .init_rx = mlx5e_init_rep_rx,
939 .cleanup_rx = mlx5e_cleanup_rep_rx,
940 .init_tx = mlx5e_init_rep_tx,
941 .cleanup_tx = mlx5e_cleanup_nic_tx,
942 .update_stats = mlx5e_rep_update_stats,
943 .max_nch = mlx5e_get_rep_max_num_channels,
944 .update_carrier = NULL,
945 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
946 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
947 .max_tc = 1,
948 };
949
950 /* e-Switch vport representors */
951
952 static int
mlx5e_nic_rep_load(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)953 mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
954 {
955 struct mlx5e_priv *priv = netdev_priv(rep->netdev);
956 struct mlx5e_rep_priv *rpriv = priv->ppriv;
957
958 int err;
959
960 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
961 err = mlx5e_add_sqs_fwd_rules(priv);
962 if (err)
963 return err;
964 }
965
966 err = mlx5e_rep_neigh_init(rpriv);
967 if (err)
968 goto err_remove_sqs;
969
970 return 0;
971
972 err_remove_sqs:
973 mlx5e_remove_sqs_fwd_rules(priv);
974 return err;
975 }
976
977 static void
mlx5e_nic_rep_unload(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)978 mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
979 {
980 struct mlx5e_priv *priv = netdev_priv(rep->netdev);
981 struct mlx5e_rep_priv *rpriv = priv->ppriv;
982
983 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
984 mlx5e_remove_sqs_fwd_rules(priv);
985
986 /* clean (and re-init) existing uplink offloaded TC rules */
987 mlx5e_tc_cleanup(priv);
988 mlx5e_tc_init(priv);
989
990 mlx5e_rep_neigh_cleanup(rpriv);
991 }
992
993 static int
mlx5e_vport_rep_load(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)994 mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
995 {
996 struct mlx5e_rep_priv *rpriv;
997 struct net_device *netdev;
998 int err;
999
1000 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1001 if (!rpriv)
1002 return -ENOMEM;
1003
1004 netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
1005 if (!netdev) {
1006 pr_warn("Failed to create representor netdev for vport %d\n",
1007 rep->vport);
1008 kfree(rpriv);
1009 return -EINVAL;
1010 }
1011
1012 rep->netdev = netdev;
1013 rpriv->rep = rep;
1014
1015 err = mlx5e_attach_netdev(netdev_priv(netdev));
1016 if (err) {
1017 pr_warn("Failed to attach representor netdev for vport %d\n",
1018 rep->vport);
1019 goto err_destroy_netdev;
1020 }
1021
1022 err = mlx5e_rep_neigh_init(rpriv);
1023 if (err) {
1024 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1025 rep->vport);
1026 goto err_detach_netdev;
1027 }
1028
1029 err = register_netdev(netdev);
1030 if (err) {
1031 pr_warn("Failed to register representor netdev for vport %d\n",
1032 rep->vport);
1033 goto err_neigh_cleanup;
1034 }
1035
1036 return 0;
1037
1038 err_neigh_cleanup:
1039 mlx5e_rep_neigh_cleanup(rpriv);
1040
1041 err_detach_netdev:
1042 mlx5e_detach_netdev(netdev_priv(netdev));
1043
1044 err_destroy_netdev:
1045 mlx5e_destroy_netdev(netdev_priv(netdev));
1046 kfree(rpriv);
1047 return err;
1048 }
1049
1050 static void
mlx5e_vport_rep_unload(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)1051 mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1052 {
1053 struct net_device *netdev = rep->netdev;
1054 struct mlx5e_priv *priv = netdev_priv(netdev);
1055 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1056 void *ppriv = priv->ppriv;
1057
1058 unregister_netdev(rep->netdev);
1059
1060 mlx5e_rep_neigh_cleanup(rpriv);
1061 mlx5e_detach_netdev(priv);
1062 mlx5e_destroy_netdev(priv);
1063 kfree(ppriv); /* mlx5e_rep_priv */
1064 }
1065
mlx5e_rep_register_vf_vports(struct mlx5e_priv * priv)1066 static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1067 {
1068 struct mlx5_core_dev *mdev = priv->mdev;
1069 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1070 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1071 int vport;
1072 u8 mac[ETH_ALEN];
1073
1074 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
1075
1076 for (vport = 1; vport < total_vfs; vport++) {
1077 struct mlx5_eswitch_rep rep;
1078
1079 rep.load = mlx5e_vport_rep_load;
1080 rep.unload = mlx5e_vport_rep_unload;
1081 rep.vport = vport;
1082 ether_addr_copy(rep.hw_id, mac);
1083 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
1084 }
1085 }
1086
mlx5e_rep_unregister_vf_vports(struct mlx5e_priv * priv)1087 static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1088 {
1089 struct mlx5_core_dev *mdev = priv->mdev;
1090 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1091 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1092 int vport;
1093
1094 for (vport = 1; vport < total_vfs; vport++)
1095 mlx5_eswitch_unregister_vport_rep(esw, vport);
1096 }
1097
mlx5e_register_vport_reps(struct mlx5e_priv * priv)1098 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1099 {
1100 struct mlx5_core_dev *mdev = priv->mdev;
1101 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1102 struct mlx5_eswitch_rep rep;
1103
1104 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
1105 rep.load = mlx5e_nic_rep_load;
1106 rep.unload = mlx5e_nic_rep_unload;
1107 rep.vport = FDB_UPLINK_VPORT;
1108 rep.netdev = priv->netdev;
1109 mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
1110
1111 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1112 }
1113
mlx5e_unregister_vport_reps(struct mlx5e_priv * priv)1114 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1115 {
1116 struct mlx5_core_dev *mdev = priv->mdev;
1117 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1118
1119 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1120 mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
1121 }
1122
mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev * mdev)1123 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1124 {
1125 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1126 struct mlx5e_rep_priv *rpriv;
1127
1128 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1129 if (!rpriv)
1130 return NULL;
1131
1132 rpriv->rep = &esw->offloads.vport_reps[0];
1133 return rpriv;
1134 }
1135