• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <net/geneve.h>
38 #include <linux/bpf.h>
39 #include <linux/if_bridge.h>
40 #include <net/page_pool.h>
41 #include <net/xdp_sock.h>
42 #include "eswitch.h"
43 #include "en.h"
44 #include "en/txrx.h"
45 #include "en_tc.h"
46 #include "en_rep.h"
47 #include "en_accel/ipsec.h"
48 #include "en_accel/ipsec_rxtx.h"
49 #include "en_accel/en_accel.h"
50 #include "en_accel/tls.h"
51 #include "accel/ipsec.h"
52 #include "accel/tls.h"
53 #include "lib/vxlan.h"
54 #include "lib/clock.h"
55 #include "en/port.h"
56 #include "en/xdp.h"
57 #include "lib/eq.h"
58 #include "en/monitor_stats.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "en/xsk/umem.h"
62 #include "en/xsk/setup.h"
63 #include "en/xsk/rx.h"
64 #include "en/xsk/tx.h"
65 #include "en/hv_vhca_stats.h"
66 
67 
mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev * mdev)68 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
69 {
70 	bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
71 		MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
72 		MLX5_CAP_ETH(mdev, reg_umr_sq);
73 	u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
74 	bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
75 
76 	if (!striding_rq_umr)
77 		return false;
78 	if (!inline_umr) {
79 		mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
80 			       (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
81 		return false;
82 	}
83 	return true;
84 }
85 
mlx5e_init_rq_type_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)86 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
87 			       struct mlx5e_params *params)
88 {
89 	params->log_rq_mtu_frames = is_kdump_kernel() ?
90 		MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
91 		MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
92 
93 	mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
94 		       params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
95 		       params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
96 		       BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
97 		       BIT(params->log_rq_mtu_frames),
98 		       BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
99 		       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
100 }
101 
mlx5e_striding_rq_possible(struct mlx5_core_dev * mdev,struct mlx5e_params * params)102 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
103 				struct mlx5e_params *params)
104 {
105 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
106 		return false;
107 
108 	if (MLX5_IPSEC_DEV(mdev))
109 		return false;
110 
111 	if (params->xdp_prog) {
112 		/* XSK params are not considered here. If striding RQ is in use,
113 		 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
114 		 * be called with the known XSK params.
115 		 */
116 		if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
117 			return false;
118 	}
119 
120 	return true;
121 }
122 
mlx5e_set_rq_type(struct mlx5_core_dev * mdev,struct mlx5e_params * params)123 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
124 {
125 	params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
126 		MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
127 		MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
128 		MLX5_WQ_TYPE_CYCLIC;
129 }
130 
mlx5e_update_carrier(struct mlx5e_priv * priv)131 void mlx5e_update_carrier(struct mlx5e_priv *priv)
132 {
133 	struct mlx5_core_dev *mdev = priv->mdev;
134 	u8 port_state;
135 
136 	port_state = mlx5_query_vport_state(mdev,
137 					    MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
138 					    0);
139 
140 	if (port_state == VPORT_STATE_UP) {
141 		netdev_info(priv->netdev, "Link up\n");
142 		netif_carrier_on(priv->netdev);
143 	} else {
144 		netdev_info(priv->netdev, "Link down\n");
145 		netif_carrier_off(priv->netdev);
146 	}
147 }
148 
mlx5e_update_carrier_work(struct work_struct * work)149 static void mlx5e_update_carrier_work(struct work_struct *work)
150 {
151 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
152 					       update_carrier_work);
153 
154 	mutex_lock(&priv->state_lock);
155 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
156 		if (priv->profile->update_carrier)
157 			priv->profile->update_carrier(priv);
158 	mutex_unlock(&priv->state_lock);
159 }
160 
mlx5e_update_stats(struct mlx5e_priv * priv)161 void mlx5e_update_stats(struct mlx5e_priv *priv)
162 {
163 	int i;
164 
165 	for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
166 		if (mlx5e_stats_grps[i].update_stats)
167 			mlx5e_stats_grps[i].update_stats(priv);
168 }
169 
mlx5e_update_ndo_stats(struct mlx5e_priv * priv)170 void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
171 {
172 	int i;
173 
174 	for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
175 		if (mlx5e_stats_grps[i].update_stats_mask &
176 		    MLX5E_NDO_UPDATE_STATS)
177 			mlx5e_stats_grps[i].update_stats(priv);
178 }
179 
mlx5e_update_stats_work(struct work_struct * work)180 static void mlx5e_update_stats_work(struct work_struct *work)
181 {
182 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
183 					       update_stats_work);
184 
185 	mutex_lock(&priv->state_lock);
186 	priv->profile->update_stats(priv);
187 	mutex_unlock(&priv->state_lock);
188 }
189 
mlx5e_queue_update_stats(struct mlx5e_priv * priv)190 void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
191 {
192 	if (!priv->profile->update_stats)
193 		return;
194 
195 	if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
196 		return;
197 
198 	queue_work(priv->wq, &priv->update_stats_work);
199 }
200 
async_event(struct notifier_block * nb,unsigned long event,void * data)201 static int async_event(struct notifier_block *nb, unsigned long event, void *data)
202 {
203 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
204 	struct mlx5_eqe   *eqe = data;
205 
206 	if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
207 		return NOTIFY_DONE;
208 
209 	switch (eqe->sub_type) {
210 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
211 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
212 		queue_work(priv->wq, &priv->update_carrier_work);
213 		break;
214 	default:
215 		return NOTIFY_DONE;
216 	}
217 
218 	return NOTIFY_OK;
219 }
220 
mlx5e_enable_async_events(struct mlx5e_priv * priv)221 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
222 {
223 	priv->events_nb.notifier_call = async_event;
224 	mlx5_notifier_register(priv->mdev, &priv->events_nb);
225 }
226 
mlx5e_disable_async_events(struct mlx5e_priv * priv)227 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
228 {
229 	mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
230 }
231 
mlx5e_build_umr_wqe(struct mlx5e_rq * rq,struct mlx5e_icosq * sq,struct mlx5e_umr_wqe * wqe)232 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
233 				       struct mlx5e_icosq *sq,
234 				       struct mlx5e_umr_wqe *wqe)
235 {
236 	struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
237 	struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
238 	u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
239 
240 	cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
241 				      ds_cnt);
242 	cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
243 	cseg->imm       = rq->mkey_be;
244 
245 	ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
246 	ucseg->xlt_octowords =
247 		cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
248 	ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
249 }
250 
mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq * rq,struct mlx5e_channel * c)251 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
252 				     struct mlx5e_channel *c)
253 {
254 	int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
255 
256 	rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
257 						  sizeof(*rq->mpwqe.info)),
258 				       GFP_KERNEL, cpu_to_node(c->cpu));
259 	if (!rq->mpwqe.info)
260 		return -ENOMEM;
261 
262 	mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
263 
264 	return 0;
265 }
266 
mlx5e_create_umr_mkey(struct mlx5_core_dev * mdev,u64 npages,u8 page_shift,struct mlx5_core_mkey * umr_mkey)267 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
268 				 u64 npages, u8 page_shift,
269 				 struct mlx5_core_mkey *umr_mkey)
270 {
271 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
272 	void *mkc;
273 	u32 *in;
274 	int err;
275 
276 	in = kvzalloc(inlen, GFP_KERNEL);
277 	if (!in)
278 		return -ENOMEM;
279 
280 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
281 
282 	MLX5_SET(mkc, mkc, free, 1);
283 	MLX5_SET(mkc, mkc, umr_en, 1);
284 	MLX5_SET(mkc, mkc, lw, 1);
285 	MLX5_SET(mkc, mkc, lr, 1);
286 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
287 
288 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
289 	MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
290 	MLX5_SET64(mkc, mkc, len, npages << page_shift);
291 	MLX5_SET(mkc, mkc, translations_octword_size,
292 		 MLX5_MTT_OCTW(npages));
293 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
294 
295 	err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
296 
297 	kvfree(in);
298 	return err;
299 }
300 
mlx5e_create_rq_umr_mkey(struct mlx5_core_dev * mdev,struct mlx5e_rq * rq)301 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
302 {
303 	u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
304 
305 	return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
306 }
307 
mlx5e_get_mpwqe_offset(struct mlx5e_rq * rq,u16 wqe_ix)308 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
309 {
310 	return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
311 }
312 
mlx5e_init_frags_partition(struct mlx5e_rq * rq)313 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
314 {
315 	struct mlx5e_wqe_frag_info next_frag = {};
316 	struct mlx5e_wqe_frag_info *prev = NULL;
317 	int i;
318 
319 	next_frag.di = &rq->wqe.di[0];
320 
321 	for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
322 		struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
323 		struct mlx5e_wqe_frag_info *frag =
324 			&rq->wqe.frags[i << rq->wqe.info.log_num_frags];
325 		int f;
326 
327 		for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
328 			if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
329 				next_frag.di++;
330 				next_frag.offset = 0;
331 				if (prev)
332 					prev->last_in_page = true;
333 			}
334 			*frag = next_frag;
335 
336 			/* prepare next */
337 			next_frag.offset += frag_info[f].frag_stride;
338 			prev = frag;
339 		}
340 	}
341 
342 	if (prev)
343 		prev->last_in_page = true;
344 }
345 
mlx5e_init_di_list(struct mlx5e_rq * rq,int wq_sz,int cpu)346 static int mlx5e_init_di_list(struct mlx5e_rq *rq,
347 			      int wq_sz, int cpu)
348 {
349 	int len = wq_sz << rq->wqe.info.log_num_frags;
350 
351 	rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
352 				   GFP_KERNEL, cpu_to_node(cpu));
353 	if (!rq->wqe.di)
354 		return -ENOMEM;
355 
356 	mlx5e_init_frags_partition(rq);
357 
358 	return 0;
359 }
360 
mlx5e_free_di_list(struct mlx5e_rq * rq)361 static void mlx5e_free_di_list(struct mlx5e_rq *rq)
362 {
363 	kvfree(rq->wqe.di);
364 }
365 
mlx5e_rq_err_cqe_work(struct work_struct * recover_work)366 static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
367 {
368 	struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
369 
370 	mlx5e_reporter_rq_cqe_err(rq);
371 }
372 
mlx5e_alloc_rq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct xdp_umem * umem,struct mlx5e_rq_param * rqp,struct mlx5e_rq * rq)373 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
374 			  struct mlx5e_params *params,
375 			  struct mlx5e_xsk_param *xsk,
376 			  struct xdp_umem *umem,
377 			  struct mlx5e_rq_param *rqp,
378 			  struct mlx5e_rq *rq)
379 {
380 	struct page_pool_params pp_params = { 0 };
381 	struct mlx5_core_dev *mdev = c->mdev;
382 	void *rqc = rqp->rqc;
383 	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
384 	u32 num_xsk_frames = 0;
385 	u32 rq_xdp_ix;
386 	u32 pool_size;
387 	int wq_sz;
388 	int err;
389 	int i;
390 
391 	rqp->wq.db_numa_node = cpu_to_node(c->cpu);
392 
393 	rq->wq_type = params->rq_wq_type;
394 	rq->pdev    = c->pdev;
395 	rq->netdev  = c->netdev;
396 	rq->tstamp  = c->tstamp;
397 	rq->clock   = &mdev->clock;
398 	rq->channel = c;
399 	rq->ix      = c->ix;
400 	rq->mdev    = mdev;
401 	rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
402 	rq->xdpsq   = &c->rq_xdpsq;
403 	rq->umem    = umem;
404 
405 	if (rq->umem)
406 		rq->stats = &c->priv->channel_stats[c->ix].xskrq;
407 	else
408 		rq->stats = &c->priv->channel_stats[c->ix].rq;
409 	INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
410 
411 	rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
412 	if (IS_ERR(rq->xdp_prog)) {
413 		err = PTR_ERR(rq->xdp_prog);
414 		rq->xdp_prog = NULL;
415 		goto err_rq_wq_destroy;
416 	}
417 
418 	rq_xdp_ix = rq->ix;
419 	if (xsk)
420 		rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
421 	err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
422 	if (err < 0)
423 		goto err_rq_wq_destroy;
424 
425 	rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
426 	rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
427 	rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
428 	pool_size = 1 << params->log_rq_mtu_frames;
429 
430 	switch (rq->wq_type) {
431 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
432 		err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
433 					&rq->wq_ctrl);
434 		if (err)
435 			return err;
436 
437 		rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
438 
439 		wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
440 
441 		if (xsk)
442 			num_xsk_frames = wq_sz <<
443 				mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
444 
445 		pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
446 			mlx5e_mpwqe_get_log_rq_size(params, xsk);
447 
448 		rq->post_wqes = mlx5e_post_rx_mpwqes;
449 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
450 
451 		rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
452 #ifdef CONFIG_MLX5_EN_IPSEC
453 		if (MLX5_IPSEC_DEV(mdev)) {
454 			err = -EINVAL;
455 			netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
456 			goto err_rq_wq_destroy;
457 		}
458 #endif
459 		if (!rq->handle_rx_cqe) {
460 			err = -EINVAL;
461 			netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
462 			goto err_rq_wq_destroy;
463 		}
464 
465 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
466 			mlx5e_xsk_skb_from_cqe_mpwrq_linear :
467 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
468 				mlx5e_skb_from_cqe_mpwrq_linear :
469 				mlx5e_skb_from_cqe_mpwrq_nonlinear;
470 
471 		rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
472 		rq->mpwqe.num_strides =
473 			BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
474 
475 		err = mlx5e_create_rq_umr_mkey(mdev, rq);
476 		if (err)
477 			goto err_rq_wq_destroy;
478 		rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
479 
480 		err = mlx5e_rq_alloc_mpwqe_info(rq, c);
481 		if (err)
482 			goto err_free;
483 		break;
484 	default: /* MLX5_WQ_TYPE_CYCLIC */
485 		err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
486 					 &rq->wq_ctrl);
487 		if (err)
488 			return err;
489 
490 		rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
491 
492 		wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
493 
494 		if (xsk)
495 			num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
496 
497 		rq->wqe.info = rqp->frags_info;
498 		rq->wqe.frags =
499 			kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
500 					(wq_sz << rq->wqe.info.log_num_frags)),
501 				      GFP_KERNEL, cpu_to_node(c->cpu));
502 		if (!rq->wqe.frags) {
503 			err = -ENOMEM;
504 			goto err_free;
505 		}
506 
507 		err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
508 		if (err)
509 			goto err_free;
510 
511 		rq->post_wqes = mlx5e_post_rx_wqes;
512 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
513 
514 #ifdef CONFIG_MLX5_EN_IPSEC
515 		if (c->priv->ipsec)
516 			rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
517 		else
518 #endif
519 			rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
520 		if (!rq->handle_rx_cqe) {
521 			err = -EINVAL;
522 			netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
523 			goto err_free;
524 		}
525 
526 		rq->wqe.skb_from_cqe = xsk ?
527 			mlx5e_xsk_skb_from_cqe_linear :
528 			mlx5e_rx_is_linear_skb(params, NULL) ?
529 				mlx5e_skb_from_cqe_linear :
530 				mlx5e_skb_from_cqe_nonlinear;
531 		rq->mkey_be = c->mkey_be;
532 	}
533 
534 	if (xsk) {
535 		err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
536 		if (unlikely(err)) {
537 			mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
538 				      num_xsk_frames);
539 			goto err_free;
540 		}
541 
542 		rq->zca.free = mlx5e_xsk_zca_free;
543 		err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
544 						 MEM_TYPE_ZERO_COPY,
545 						 &rq->zca);
546 	} else {
547 		/* Create a page_pool and register it with rxq */
548 		pp_params.order     = 0;
549 		pp_params.flags     = 0; /* No-internal DMA mapping in page_pool */
550 		pp_params.pool_size = pool_size;
551 		pp_params.nid       = cpu_to_node(c->cpu);
552 		pp_params.dev       = c->pdev;
553 		pp_params.dma_dir   = rq->buff.map_dir;
554 
555 		/* page_pool can be used even when there is no rq->xdp_prog,
556 		 * given page_pool does not handle DMA mapping there is no
557 		 * required state to clear. And page_pool gracefully handle
558 		 * elevated refcnt.
559 		 */
560 		rq->page_pool = page_pool_create(&pp_params);
561 		if (IS_ERR(rq->page_pool)) {
562 			err = PTR_ERR(rq->page_pool);
563 			rq->page_pool = NULL;
564 			goto err_free;
565 		}
566 		err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
567 						 MEM_TYPE_PAGE_POOL, rq->page_pool);
568 	}
569 	if (err)
570 		goto err_free;
571 
572 	for (i = 0; i < wq_sz; i++) {
573 		if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
574 			struct mlx5e_rx_wqe_ll *wqe =
575 				mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
576 			u32 byte_count =
577 				rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
578 			u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
579 
580 			wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
581 			wqe->data[0].byte_count = cpu_to_be32(byte_count);
582 			wqe->data[0].lkey = rq->mkey_be;
583 		} else {
584 			struct mlx5e_rx_wqe_cyc *wqe =
585 				mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
586 			int f;
587 
588 			for (f = 0; f < rq->wqe.info.num_frags; f++) {
589 				u32 frag_size = rq->wqe.info.arr[f].frag_size |
590 					MLX5_HW_START_PADDING;
591 
592 				wqe->data[f].byte_count = cpu_to_be32(frag_size);
593 				wqe->data[f].lkey = rq->mkey_be;
594 			}
595 			/* check if num_frags is not a pow of two */
596 			if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
597 				wqe->data[f].byte_count = 0;
598 				wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
599 				wqe->data[f].addr = 0;
600 			}
601 		}
602 	}
603 
604 	INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
605 
606 	switch (params->rx_cq_moderation.cq_period_mode) {
607 	case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
608 		rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
609 		break;
610 	case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
611 	default:
612 		rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
613 	}
614 
615 	rq->page_cache.head = 0;
616 	rq->page_cache.tail = 0;
617 
618 	return 0;
619 
620 err_free:
621 	switch (rq->wq_type) {
622 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
623 		kvfree(rq->mpwqe.info);
624 		mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
625 		break;
626 	default: /* MLX5_WQ_TYPE_CYCLIC */
627 		kvfree(rq->wqe.frags);
628 		mlx5e_free_di_list(rq);
629 	}
630 
631 err_rq_wq_destroy:
632 	if (rq->xdp_prog)
633 		bpf_prog_put(rq->xdp_prog);
634 	xdp_rxq_info_unreg(&rq->xdp_rxq);
635 	page_pool_destroy(rq->page_pool);
636 	mlx5_wq_destroy(&rq->wq_ctrl);
637 
638 	return err;
639 }
640 
mlx5e_free_rq(struct mlx5e_rq * rq)641 static void mlx5e_free_rq(struct mlx5e_rq *rq)
642 {
643 	int i;
644 
645 	if (rq->xdp_prog)
646 		bpf_prog_put(rq->xdp_prog);
647 
648 	switch (rq->wq_type) {
649 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
650 		kvfree(rq->mpwqe.info);
651 		mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
652 		break;
653 	default: /* MLX5_WQ_TYPE_CYCLIC */
654 		kvfree(rq->wqe.frags);
655 		mlx5e_free_di_list(rq);
656 	}
657 
658 	for (i = rq->page_cache.head; i != rq->page_cache.tail;
659 	     i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
660 		struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
661 
662 		/* With AF_XDP, page_cache is not used, so this loop is not
663 		 * entered, and it's safe to call mlx5e_page_release_dynamic
664 		 * directly.
665 		 */
666 		mlx5e_page_release_dynamic(rq, dma_info, false);
667 	}
668 
669 	xdp_rxq_info_unreg(&rq->xdp_rxq);
670 	page_pool_destroy(rq->page_pool);
671 	mlx5_wq_destroy(&rq->wq_ctrl);
672 }
673 
mlx5e_create_rq(struct mlx5e_rq * rq,struct mlx5e_rq_param * param)674 static int mlx5e_create_rq(struct mlx5e_rq *rq,
675 			   struct mlx5e_rq_param *param)
676 {
677 	struct mlx5_core_dev *mdev = rq->mdev;
678 
679 	void *in;
680 	void *rqc;
681 	void *wq;
682 	int inlen;
683 	int err;
684 
685 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
686 		sizeof(u64) * rq->wq_ctrl.buf.npages;
687 	in = kvzalloc(inlen, GFP_KERNEL);
688 	if (!in)
689 		return -ENOMEM;
690 
691 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
692 	wq  = MLX5_ADDR_OF(rqc, rqc, wq);
693 
694 	memcpy(rqc, param->rqc, sizeof(param->rqc));
695 
696 	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
697 	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
698 	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
699 						MLX5_ADAPTER_PAGE_SHIFT);
700 	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
701 
702 	mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
703 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
704 
705 	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
706 
707 	kvfree(in);
708 
709 	return err;
710 }
711 
mlx5e_modify_rq_state(struct mlx5e_rq * rq,int curr_state,int next_state)712 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
713 {
714 	struct mlx5_core_dev *mdev = rq->mdev;
715 
716 	void *in;
717 	void *rqc;
718 	int inlen;
719 	int err;
720 
721 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
722 	in = kvzalloc(inlen, GFP_KERNEL);
723 	if (!in)
724 		return -ENOMEM;
725 
726 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
727 
728 	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
729 	MLX5_SET(rqc, rqc, state, next_state);
730 
731 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
732 
733 	kvfree(in);
734 
735 	return err;
736 }
737 
mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq * rq,bool enable)738 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
739 {
740 	struct mlx5e_channel *c = rq->channel;
741 	struct mlx5e_priv *priv = c->priv;
742 	struct mlx5_core_dev *mdev = priv->mdev;
743 
744 	void *in;
745 	void *rqc;
746 	int inlen;
747 	int err;
748 
749 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
750 	in = kvzalloc(inlen, GFP_KERNEL);
751 	if (!in)
752 		return -ENOMEM;
753 
754 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
755 
756 	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
757 	MLX5_SET64(modify_rq_in, in, modify_bitmask,
758 		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
759 	MLX5_SET(rqc, rqc, scatter_fcs, enable);
760 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
761 
762 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
763 
764 	kvfree(in);
765 
766 	return err;
767 }
768 
mlx5e_modify_rq_vsd(struct mlx5e_rq * rq,bool vsd)769 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
770 {
771 	struct mlx5e_channel *c = rq->channel;
772 	struct mlx5_core_dev *mdev = c->mdev;
773 	void *in;
774 	void *rqc;
775 	int inlen;
776 	int err;
777 
778 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
779 	in = kvzalloc(inlen, GFP_KERNEL);
780 	if (!in)
781 		return -ENOMEM;
782 
783 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
784 
785 	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
786 	MLX5_SET64(modify_rq_in, in, modify_bitmask,
787 		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
788 	MLX5_SET(rqc, rqc, vsd, vsd);
789 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
790 
791 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
792 
793 	kvfree(in);
794 
795 	return err;
796 }
797 
mlx5e_destroy_rq(struct mlx5e_rq * rq)798 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
799 {
800 	mlx5_core_destroy_rq(rq->mdev, rq->rqn);
801 }
802 
mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq * rq,int wait_time)803 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
804 {
805 	unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
806 	struct mlx5e_channel *c = rq->channel;
807 
808 	u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
809 
810 	do {
811 		if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
812 			return 0;
813 
814 		msleep(20);
815 	} while (time_before(jiffies, exp_time));
816 
817 	netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
818 		    c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
819 
820 	mlx5e_reporter_rx_timeout(rq);
821 	return -ETIMEDOUT;
822 }
823 
mlx5e_free_rx_descs(struct mlx5e_rq * rq)824 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
825 {
826 	__be16 wqe_ix_be;
827 	u16 wqe_ix;
828 
829 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
830 		struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
831 		u16 head = wq->head;
832 		int i;
833 
834 		/* Outstanding UMR WQEs (in progress) start at wq->head */
835 		for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
836 			rq->dealloc_wqe(rq, head);
837 			head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
838 		}
839 
840 		while (!mlx5_wq_ll_is_empty(wq)) {
841 			struct mlx5e_rx_wqe_ll *wqe;
842 
843 			wqe_ix_be = *wq->tail_next;
844 			wqe_ix    = be16_to_cpu(wqe_ix_be);
845 			wqe       = mlx5_wq_ll_get_wqe(wq, wqe_ix);
846 			rq->dealloc_wqe(rq, wqe_ix);
847 			mlx5_wq_ll_pop(wq, wqe_ix_be,
848 				       &wqe->next.next_wqe_index);
849 		}
850 	} else {
851 		struct mlx5_wq_cyc *wq = &rq->wqe.wq;
852 
853 		while (!mlx5_wq_cyc_is_empty(wq)) {
854 			wqe_ix = mlx5_wq_cyc_get_tail(wq);
855 			rq->dealloc_wqe(rq, wqe_ix);
856 			mlx5_wq_cyc_pop(wq);
857 		}
858 	}
859 
860 }
861 
mlx5e_open_rq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_rq_param * param,struct mlx5e_xsk_param * xsk,struct xdp_umem * umem,struct mlx5e_rq * rq)862 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
863 		  struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
864 		  struct xdp_umem *umem, struct mlx5e_rq *rq)
865 {
866 	int err;
867 
868 	err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
869 	if (err)
870 		return err;
871 
872 	err = mlx5e_create_rq(rq, param);
873 	if (err)
874 		goto err_free_rq;
875 
876 	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
877 	if (err)
878 		goto err_destroy_rq;
879 
880 	if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
881 		__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
882 
883 	if (params->rx_dim_enabled)
884 		__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
885 
886 	/* We disable csum_complete when XDP is enabled since
887 	 * XDP programs might manipulate packets which will render
888 	 * skb->checksum incorrect.
889 	 */
890 	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
891 		__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
892 
893 	return 0;
894 
895 err_destroy_rq:
896 	mlx5e_destroy_rq(rq);
897 err_free_rq:
898 	mlx5e_free_rq(rq);
899 
900 	return err;
901 }
902 
mlx5e_activate_rq(struct mlx5e_rq * rq)903 void mlx5e_activate_rq(struct mlx5e_rq *rq)
904 {
905 	set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
906 	mlx5e_trigger_irq(&rq->channel->icosq);
907 }
908 
mlx5e_deactivate_rq(struct mlx5e_rq * rq)909 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
910 {
911 	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
912 	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
913 }
914 
mlx5e_close_rq(struct mlx5e_rq * rq)915 void mlx5e_close_rq(struct mlx5e_rq *rq)
916 {
917 	cancel_work_sync(&rq->dim.work);
918 	cancel_work_sync(&rq->channel->icosq.recover_work);
919 	cancel_work_sync(&rq->recover_work);
920 	mlx5e_destroy_rq(rq);
921 	mlx5e_free_rx_descs(rq);
922 	mlx5e_free_rq(rq);
923 }
924 
mlx5e_free_xdpsq_db(struct mlx5e_xdpsq * sq)925 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
926 {
927 	kvfree(sq->db.xdpi_fifo.xi);
928 	kvfree(sq->db.wqe_info);
929 }
930 
mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq * sq,int numa)931 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
932 {
933 	struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
934 	int wq_sz        = mlx5_wq_cyc_get_size(&sq->wq);
935 	int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
936 
937 	xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
938 				      GFP_KERNEL, numa);
939 	if (!xdpi_fifo->xi)
940 		return -ENOMEM;
941 
942 	xdpi_fifo->pc   = &sq->xdpi_fifo_pc;
943 	xdpi_fifo->cc   = &sq->xdpi_fifo_cc;
944 	xdpi_fifo->mask = dsegs_per_wq - 1;
945 
946 	return 0;
947 }
948 
mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq * sq,int numa)949 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
950 {
951 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
952 	int err;
953 
954 	sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
955 					GFP_KERNEL, numa);
956 	if (!sq->db.wqe_info)
957 		return -ENOMEM;
958 
959 	err = mlx5e_alloc_xdpsq_fifo(sq, numa);
960 	if (err) {
961 		mlx5e_free_xdpsq_db(sq);
962 		return err;
963 	}
964 
965 	return 0;
966 }
967 
mlx5e_alloc_xdpsq(struct mlx5e_channel * c,struct mlx5e_params * params,struct xdp_umem * umem,struct mlx5e_sq_param * param,struct mlx5e_xdpsq * sq,bool is_redirect)968 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
969 			     struct mlx5e_params *params,
970 			     struct xdp_umem *umem,
971 			     struct mlx5e_sq_param *param,
972 			     struct mlx5e_xdpsq *sq,
973 			     bool is_redirect)
974 {
975 	void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
976 	struct mlx5_core_dev *mdev = c->mdev;
977 	struct mlx5_wq_cyc *wq = &sq->wq;
978 	int err;
979 
980 	sq->pdev      = c->pdev;
981 	sq->mkey_be   = c->mkey_be;
982 	sq->channel   = c;
983 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
984 	sq->min_inline_mode = params->tx_min_inline_mode;
985 	sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
986 	sq->umem      = umem;
987 
988 	sq->stats = sq->umem ?
989 		&c->priv->channel_stats[c->ix].xsksq :
990 		is_redirect ?
991 			&c->priv->channel_stats[c->ix].xdpsq :
992 			&c->priv->channel_stats[c->ix].rq_xdpsq;
993 
994 	param->wq.db_numa_node = cpu_to_node(c->cpu);
995 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
996 	if (err)
997 		return err;
998 	wq->db = &wq->db[MLX5_SND_DBR];
999 
1000 	err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1001 	if (err)
1002 		goto err_sq_wq_destroy;
1003 
1004 	return 0;
1005 
1006 err_sq_wq_destroy:
1007 	mlx5_wq_destroy(&sq->wq_ctrl);
1008 
1009 	return err;
1010 }
1011 
mlx5e_free_xdpsq(struct mlx5e_xdpsq * sq)1012 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1013 {
1014 	mlx5e_free_xdpsq_db(sq);
1015 	mlx5_wq_destroy(&sq->wq_ctrl);
1016 }
1017 
mlx5e_free_icosq_db(struct mlx5e_icosq * sq)1018 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1019 {
1020 	kvfree(sq->db.ico_wqe);
1021 }
1022 
mlx5e_alloc_icosq_db(struct mlx5e_icosq * sq,int numa)1023 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1024 {
1025 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1026 
1027 	sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
1028 						  sizeof(*sq->db.ico_wqe)),
1029 				       GFP_KERNEL, numa);
1030 	if (!sq->db.ico_wqe)
1031 		return -ENOMEM;
1032 
1033 	return 0;
1034 }
1035 
mlx5e_icosq_err_cqe_work(struct work_struct * recover_work)1036 static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1037 {
1038 	struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1039 					      recover_work);
1040 
1041 	mlx5e_reporter_icosq_cqe_err(sq);
1042 }
1043 
mlx5e_alloc_icosq(struct mlx5e_channel * c,struct mlx5e_sq_param * param,struct mlx5e_icosq * sq)1044 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1045 			     struct mlx5e_sq_param *param,
1046 			     struct mlx5e_icosq *sq)
1047 {
1048 	void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1049 	struct mlx5_core_dev *mdev = c->mdev;
1050 	struct mlx5_wq_cyc *wq = &sq->wq;
1051 	int err;
1052 
1053 	sq->channel   = c;
1054 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1055 
1056 	param->wq.db_numa_node = cpu_to_node(c->cpu);
1057 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1058 	if (err)
1059 		return err;
1060 	wq->db = &wq->db[MLX5_SND_DBR];
1061 
1062 	err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1063 	if (err)
1064 		goto err_sq_wq_destroy;
1065 
1066 	INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
1067 
1068 	return 0;
1069 
1070 err_sq_wq_destroy:
1071 	mlx5_wq_destroy(&sq->wq_ctrl);
1072 
1073 	return err;
1074 }
1075 
mlx5e_free_icosq(struct mlx5e_icosq * sq)1076 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1077 {
1078 	mlx5e_free_icosq_db(sq);
1079 	mlx5_wq_destroy(&sq->wq_ctrl);
1080 }
1081 
mlx5e_free_txqsq_db(struct mlx5e_txqsq * sq)1082 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1083 {
1084 	kvfree(sq->db.wqe_info);
1085 	kvfree(sq->db.dma_fifo);
1086 }
1087 
mlx5e_alloc_txqsq_db(struct mlx5e_txqsq * sq,int numa)1088 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1089 {
1090 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1091 	int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1092 
1093 	sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1094 						   sizeof(*sq->db.dma_fifo)),
1095 					GFP_KERNEL, numa);
1096 	sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1097 						   sizeof(*sq->db.wqe_info)),
1098 					GFP_KERNEL, numa);
1099 	if (!sq->db.dma_fifo || !sq->db.wqe_info) {
1100 		mlx5e_free_txqsq_db(sq);
1101 		return -ENOMEM;
1102 	}
1103 
1104 	sq->dma_fifo_mask = df_sz - 1;
1105 
1106 	return 0;
1107 }
1108 
1109 static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
mlx5e_alloc_txqsq(struct mlx5e_channel * c,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc)1110 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1111 			     int txq_ix,
1112 			     struct mlx5e_params *params,
1113 			     struct mlx5e_sq_param *param,
1114 			     struct mlx5e_txqsq *sq,
1115 			     int tc)
1116 {
1117 	void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1118 	struct mlx5_core_dev *mdev = c->mdev;
1119 	struct mlx5_wq_cyc *wq = &sq->wq;
1120 	int err;
1121 
1122 	sq->pdev      = c->pdev;
1123 	sq->tstamp    = c->tstamp;
1124 	sq->clock     = &mdev->clock;
1125 	sq->mkey_be   = c->mkey_be;
1126 	sq->channel   = c;
1127 	sq->ch_ix     = c->ix;
1128 	sq->txq_ix    = txq_ix;
1129 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1130 	sq->min_inline_mode = params->tx_min_inline_mode;
1131 	sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1132 	sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
1133 	sq->stop_room = MLX5E_SQ_STOP_ROOM;
1134 	INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
1135 	if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
1136 		set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
1137 	if (MLX5_IPSEC_DEV(c->priv->mdev))
1138 		set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1139 #ifdef CONFIG_MLX5_EN_TLS
1140 	if (mlx5_accel_is_tls_device(c->priv->mdev)) {
1141 		set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1142 		sq->stop_room += MLX5E_SQ_TLS_ROOM +
1143 			mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
1144 						    TLS_MAX_PAYLOAD_SIZE);
1145 	}
1146 #endif
1147 
1148 	param->wq.db_numa_node = cpu_to_node(c->cpu);
1149 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1150 	if (err)
1151 		return err;
1152 	wq->db    = &wq->db[MLX5_SND_DBR];
1153 
1154 	err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1155 	if (err)
1156 		goto err_sq_wq_destroy;
1157 
1158 	INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1159 	sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1160 
1161 	return 0;
1162 
1163 err_sq_wq_destroy:
1164 	mlx5_wq_destroy(&sq->wq_ctrl);
1165 
1166 	return err;
1167 }
1168 
mlx5e_free_txqsq(struct mlx5e_txqsq * sq)1169 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1170 {
1171 	mlx5e_free_txqsq_db(sq);
1172 	mlx5_wq_destroy(&sq->wq_ctrl);
1173 }
1174 
1175 struct mlx5e_create_sq_param {
1176 	struct mlx5_wq_ctrl        *wq_ctrl;
1177 	u32                         cqn;
1178 	u32                         tisn;
1179 	u8                          tis_lst_sz;
1180 	u8                          min_inline_mode;
1181 };
1182 
mlx5e_create_sq(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param,struct mlx5e_create_sq_param * csp,u32 * sqn)1183 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1184 			   struct mlx5e_sq_param *param,
1185 			   struct mlx5e_create_sq_param *csp,
1186 			   u32 *sqn)
1187 {
1188 	void *in;
1189 	void *sqc;
1190 	void *wq;
1191 	int inlen;
1192 	int err;
1193 
1194 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1195 		sizeof(u64) * csp->wq_ctrl->buf.npages;
1196 	in = kvzalloc(inlen, GFP_KERNEL);
1197 	if (!in)
1198 		return -ENOMEM;
1199 
1200 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1201 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1202 
1203 	memcpy(sqc, param->sqc, sizeof(param->sqc));
1204 	MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
1205 	MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
1206 	MLX5_SET(sqc,  sqc, cqn, csp->cqn);
1207 
1208 	if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1209 		MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
1210 
1211 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
1212 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
1213 
1214 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
1215 	MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
1216 	MLX5_SET(wq,   wq, log_wq_pg_sz,  csp->wq_ctrl->buf.page_shift -
1217 					  MLX5_ADAPTER_PAGE_SHIFT);
1218 	MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
1219 
1220 	mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1221 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1222 
1223 	err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1224 
1225 	kvfree(in);
1226 
1227 	return err;
1228 }
1229 
mlx5e_modify_sq(struct mlx5_core_dev * mdev,u32 sqn,struct mlx5e_modify_sq_param * p)1230 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1231 		    struct mlx5e_modify_sq_param *p)
1232 {
1233 	void *in;
1234 	void *sqc;
1235 	int inlen;
1236 	int err;
1237 
1238 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1239 	in = kvzalloc(inlen, GFP_KERNEL);
1240 	if (!in)
1241 		return -ENOMEM;
1242 
1243 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1244 
1245 	MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1246 	MLX5_SET(sqc, sqc, state, p->next_state);
1247 	if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1248 		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1249 		MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
1250 	}
1251 
1252 	err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1253 
1254 	kvfree(in);
1255 
1256 	return err;
1257 }
1258 
mlx5e_destroy_sq(struct mlx5_core_dev * mdev,u32 sqn)1259 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1260 {
1261 	mlx5_core_destroy_sq(mdev, sqn);
1262 }
1263 
mlx5e_create_sq_rdy(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param,struct mlx5e_create_sq_param * csp,u32 * sqn)1264 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1265 			       struct mlx5e_sq_param *param,
1266 			       struct mlx5e_create_sq_param *csp,
1267 			       u32 *sqn)
1268 {
1269 	struct mlx5e_modify_sq_param msp = {0};
1270 	int err;
1271 
1272 	err = mlx5e_create_sq(mdev, param, csp, sqn);
1273 	if (err)
1274 		return err;
1275 
1276 	msp.curr_state = MLX5_SQC_STATE_RST;
1277 	msp.next_state = MLX5_SQC_STATE_RDY;
1278 	err = mlx5e_modify_sq(mdev, *sqn, &msp);
1279 	if (err)
1280 		mlx5e_destroy_sq(mdev, *sqn);
1281 
1282 	return err;
1283 }
1284 
1285 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1286 				struct mlx5e_txqsq *sq, u32 rate);
1287 
mlx5e_open_txqsq(struct mlx5e_channel * c,u32 tisn,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc)1288 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1289 			    u32 tisn,
1290 			    int txq_ix,
1291 			    struct mlx5e_params *params,
1292 			    struct mlx5e_sq_param *param,
1293 			    struct mlx5e_txqsq *sq,
1294 			    int tc)
1295 {
1296 	struct mlx5e_create_sq_param csp = {};
1297 	u32 tx_rate;
1298 	int err;
1299 
1300 	err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1301 	if (err)
1302 		return err;
1303 
1304 	csp.tisn            = tisn;
1305 	csp.tis_lst_sz      = 1;
1306 	csp.cqn             = sq->cq.mcq.cqn;
1307 	csp.wq_ctrl         = &sq->wq_ctrl;
1308 	csp.min_inline_mode = sq->min_inline_mode;
1309 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1310 	if (err)
1311 		goto err_free_txqsq;
1312 
1313 	tx_rate = c->priv->tx_rates[sq->txq_ix];
1314 	if (tx_rate)
1315 		mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1316 
1317 	if (params->tx_dim_enabled)
1318 		sq->state |= BIT(MLX5E_SQ_STATE_AM);
1319 
1320 	return 0;
1321 
1322 err_free_txqsq:
1323 	mlx5e_free_txqsq(sq);
1324 
1325 	return err;
1326 }
1327 
mlx5e_activate_txqsq(struct mlx5e_txqsq * sq)1328 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1329 {
1330 	sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1331 	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1332 	netdev_tx_reset_queue(sq->txq);
1333 	netif_tx_start_queue(sq->txq);
1334 }
1335 
mlx5e_tx_disable_queue(struct netdev_queue * txq)1336 void mlx5e_tx_disable_queue(struct netdev_queue *txq)
1337 {
1338 	__netif_tx_lock_bh(txq);
1339 	netif_tx_stop_queue(txq);
1340 	__netif_tx_unlock_bh(txq);
1341 }
1342 
mlx5e_deactivate_txqsq(struct mlx5e_txqsq * sq)1343 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1344 {
1345 	struct mlx5e_channel *c = sq->channel;
1346 	struct mlx5_wq_cyc *wq = &sq->wq;
1347 
1348 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1349 	/* prevent netif_tx_wake_queue */
1350 	napi_synchronize(&c->napi);
1351 
1352 	mlx5e_tx_disable_queue(sq->txq);
1353 
1354 	/* last doorbell out, godspeed .. */
1355 	if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1356 		u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1357 		struct mlx5e_tx_wqe_info *wi;
1358 		struct mlx5e_tx_wqe *nop;
1359 
1360 		wi = &sq->db.wqe_info[pi];
1361 
1362 		memset(wi, 0, sizeof(*wi));
1363 		wi->num_wqebbs = 1;
1364 		nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1365 		mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1366 	}
1367 }
1368 
mlx5e_close_txqsq(struct mlx5e_txqsq * sq)1369 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1370 {
1371 	struct mlx5e_channel *c = sq->channel;
1372 	struct mlx5_core_dev *mdev = c->mdev;
1373 	struct mlx5_rate_limit rl = {0};
1374 
1375 	cancel_work_sync(&sq->dim.work);
1376 	cancel_work_sync(&sq->recover_work);
1377 	mlx5e_destroy_sq(mdev, sq->sqn);
1378 	if (sq->rate_limit) {
1379 		rl.rate = sq->rate_limit;
1380 		mlx5_rl_remove_rate(mdev, &rl);
1381 	}
1382 	mlx5e_free_txqsq_descs(sq);
1383 	mlx5e_free_txqsq(sq);
1384 }
1385 
mlx5e_tx_err_cqe_work(struct work_struct * recover_work)1386 static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
1387 {
1388 	struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1389 					      recover_work);
1390 
1391 	mlx5e_reporter_tx_err_cqe(sq);
1392 }
1393 
mlx5e_open_icosq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_icosq * sq)1394 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1395 		     struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
1396 {
1397 	struct mlx5e_create_sq_param csp = {};
1398 	int err;
1399 
1400 	err = mlx5e_alloc_icosq(c, param, sq);
1401 	if (err)
1402 		return err;
1403 
1404 	csp.cqn             = sq->cq.mcq.cqn;
1405 	csp.wq_ctrl         = &sq->wq_ctrl;
1406 	csp.min_inline_mode = params->tx_min_inline_mode;
1407 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1408 	if (err)
1409 		goto err_free_icosq;
1410 
1411 	return 0;
1412 
1413 err_free_icosq:
1414 	mlx5e_free_icosq(sq);
1415 
1416 	return err;
1417 }
1418 
mlx5e_activate_icosq(struct mlx5e_icosq * icosq)1419 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
1420 {
1421 	set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1422 }
1423 
mlx5e_deactivate_icosq(struct mlx5e_icosq * icosq)1424 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
1425 {
1426 	struct mlx5e_channel *c = icosq->channel;
1427 
1428 	clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1429 	napi_synchronize(&c->napi);
1430 }
1431 
mlx5e_close_icosq(struct mlx5e_icosq * sq)1432 void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1433 {
1434 	struct mlx5e_channel *c = sq->channel;
1435 
1436 	mlx5e_destroy_sq(c->mdev, sq->sqn);
1437 	mlx5e_free_icosq(sq);
1438 }
1439 
mlx5e_open_xdpsq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct xdp_umem * umem,struct mlx5e_xdpsq * sq,bool is_redirect)1440 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1441 		     struct mlx5e_sq_param *param, struct xdp_umem *umem,
1442 		     struct mlx5e_xdpsq *sq, bool is_redirect)
1443 {
1444 	struct mlx5e_create_sq_param csp = {};
1445 	int err;
1446 
1447 	err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
1448 	if (err)
1449 		return err;
1450 
1451 	csp.tis_lst_sz      = 1;
1452 	csp.tisn            = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
1453 	csp.cqn             = sq->cq.mcq.cqn;
1454 	csp.wq_ctrl         = &sq->wq_ctrl;
1455 	csp.min_inline_mode = sq->min_inline_mode;
1456 	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1457 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1458 	if (err)
1459 		goto err_free_xdpsq;
1460 
1461 	mlx5e_set_xmit_fp(sq, param->is_mpw);
1462 
1463 	if (!param->is_mpw) {
1464 		unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1465 		unsigned int inline_hdr_sz = 0;
1466 		int i;
1467 
1468 		if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1469 			inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1470 			ds_cnt++;
1471 		}
1472 
1473 		/* Pre initialize fixed WQE fields */
1474 		for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1475 			struct mlx5e_xdp_wqe_info *wi  = &sq->db.wqe_info[i];
1476 			struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1477 			struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1478 			struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
1479 			struct mlx5_wqe_data_seg *dseg;
1480 
1481 			cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1482 			eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1483 
1484 			dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1485 			dseg->lkey = sq->mkey_be;
1486 
1487 			wi->num_wqebbs = 1;
1488 			wi->num_pkts   = 1;
1489 		}
1490 	}
1491 
1492 	return 0;
1493 
1494 err_free_xdpsq:
1495 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1496 	mlx5e_free_xdpsq(sq);
1497 
1498 	return err;
1499 }
1500 
mlx5e_close_xdpsq(struct mlx5e_xdpsq * sq)1501 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1502 {
1503 	struct mlx5e_channel *c = sq->channel;
1504 
1505 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1506 	napi_synchronize(&c->napi);
1507 
1508 	mlx5e_destroy_sq(c->mdev, sq->sqn);
1509 	mlx5e_free_xdpsq_descs(sq);
1510 	mlx5e_free_xdpsq(sq);
1511 }
1512 
mlx5e_alloc_cq_common(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param,struct mlx5e_cq * cq)1513 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1514 				 struct mlx5e_cq_param *param,
1515 				 struct mlx5e_cq *cq)
1516 {
1517 	struct mlx5_core_cq *mcq = &cq->mcq;
1518 	int eqn_not_used;
1519 	unsigned int irqn;
1520 	int err;
1521 	u32 i;
1522 
1523 	err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1524 	if (err)
1525 		return err;
1526 
1527 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1528 			       &cq->wq_ctrl);
1529 	if (err)
1530 		return err;
1531 
1532 	mcq->cqe_sz     = 64;
1533 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
1534 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1535 	*mcq->set_ci_db = 0;
1536 	*mcq->arm_db    = 0;
1537 	mcq->vector     = param->eq_ix;
1538 	mcq->comp       = mlx5e_completion_event;
1539 	mcq->event      = mlx5e_cq_error_event;
1540 	mcq->irqn       = irqn;
1541 
1542 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1543 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1544 
1545 		cqe->op_own = 0xf1;
1546 	}
1547 
1548 	cq->mdev = mdev;
1549 
1550 	return 0;
1551 }
1552 
mlx5e_alloc_cq(struct mlx5e_channel * c,struct mlx5e_cq_param * param,struct mlx5e_cq * cq)1553 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1554 			  struct mlx5e_cq_param *param,
1555 			  struct mlx5e_cq *cq)
1556 {
1557 	struct mlx5_core_dev *mdev = c->priv->mdev;
1558 	int err;
1559 
1560 	param->wq.buf_numa_node = cpu_to_node(c->cpu);
1561 	param->wq.db_numa_node  = cpu_to_node(c->cpu);
1562 	param->eq_ix   = c->ix;
1563 
1564 	err = mlx5e_alloc_cq_common(mdev, param, cq);
1565 
1566 	cq->napi    = &c->napi;
1567 	cq->channel = c;
1568 
1569 	return err;
1570 }
1571 
mlx5e_free_cq(struct mlx5e_cq * cq)1572 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1573 {
1574 	mlx5_wq_destroy(&cq->wq_ctrl);
1575 }
1576 
mlx5e_create_cq(struct mlx5e_cq * cq,struct mlx5e_cq_param * param)1577 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1578 {
1579 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
1580 	struct mlx5_core_dev *mdev = cq->mdev;
1581 	struct mlx5_core_cq *mcq = &cq->mcq;
1582 
1583 	void *in;
1584 	void *cqc;
1585 	int inlen;
1586 	unsigned int irqn_not_used;
1587 	int eqn;
1588 	int err;
1589 
1590 	err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1591 	if (err)
1592 		return err;
1593 
1594 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1595 		sizeof(u64) * cq->wq_ctrl.buf.npages;
1596 	in = kvzalloc(inlen, GFP_KERNEL);
1597 	if (!in)
1598 		return -ENOMEM;
1599 
1600 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1601 
1602 	memcpy(cqc, param->cqc, sizeof(param->cqc));
1603 
1604 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1605 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1606 
1607 	MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
1608 	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
1609 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
1610 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1611 					    MLX5_ADAPTER_PAGE_SHIFT);
1612 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
1613 
1614 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
1615 
1616 	kvfree(in);
1617 
1618 	if (err)
1619 		return err;
1620 
1621 	mlx5e_cq_arm(cq);
1622 
1623 	return 0;
1624 }
1625 
mlx5e_destroy_cq(struct mlx5e_cq * cq)1626 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1627 {
1628 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1629 }
1630 
mlx5e_open_cq(struct mlx5e_channel * c,struct dim_cq_moder moder,struct mlx5e_cq_param * param,struct mlx5e_cq * cq)1631 int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
1632 		  struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
1633 {
1634 	struct mlx5_core_dev *mdev = c->mdev;
1635 	int err;
1636 
1637 	err = mlx5e_alloc_cq(c, param, cq);
1638 	if (err)
1639 		return err;
1640 
1641 	err = mlx5e_create_cq(cq, param);
1642 	if (err)
1643 		goto err_free_cq;
1644 
1645 	if (MLX5_CAP_GEN(mdev, cq_moderation))
1646 		mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1647 	return 0;
1648 
1649 err_free_cq:
1650 	mlx5e_free_cq(cq);
1651 
1652 	return err;
1653 }
1654 
mlx5e_close_cq(struct mlx5e_cq * cq)1655 void mlx5e_close_cq(struct mlx5e_cq *cq)
1656 {
1657 	mlx5e_destroy_cq(cq);
1658 	mlx5e_free_cq(cq);
1659 }
1660 
mlx5e_open_tx_cqs(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)1661 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1662 			     struct mlx5e_params *params,
1663 			     struct mlx5e_channel_param *cparam)
1664 {
1665 	int err;
1666 	int tc;
1667 
1668 	for (tc = 0; tc < c->num_tc; tc++) {
1669 		err = mlx5e_open_cq(c, params->tx_cq_moderation,
1670 				    &cparam->tx_cq, &c->sq[tc].cq);
1671 		if (err)
1672 			goto err_close_tx_cqs;
1673 	}
1674 
1675 	return 0;
1676 
1677 err_close_tx_cqs:
1678 	for (tc--; tc >= 0; tc--)
1679 		mlx5e_close_cq(&c->sq[tc].cq);
1680 
1681 	return err;
1682 }
1683 
mlx5e_close_tx_cqs(struct mlx5e_channel * c)1684 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1685 {
1686 	int tc;
1687 
1688 	for (tc = 0; tc < c->num_tc; tc++)
1689 		mlx5e_close_cq(&c->sq[tc].cq);
1690 }
1691 
mlx5e_open_sqs(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)1692 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1693 			  struct mlx5e_params *params,
1694 			  struct mlx5e_channel_param *cparam)
1695 {
1696 	int err, tc;
1697 
1698 	for (tc = 0; tc < params->num_tc; tc++) {
1699 		int txq_ix = c->ix + tc * params->num_channels;
1700 
1701 		err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
1702 				       params, &cparam->sq, &c->sq[tc], tc);
1703 		if (err)
1704 			goto err_close_sqs;
1705 	}
1706 
1707 	return 0;
1708 
1709 err_close_sqs:
1710 	for (tc--; tc >= 0; tc--)
1711 		mlx5e_close_txqsq(&c->sq[tc]);
1712 
1713 	return err;
1714 }
1715 
mlx5e_close_sqs(struct mlx5e_channel * c)1716 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1717 {
1718 	int tc;
1719 
1720 	for (tc = 0; tc < c->num_tc; tc++)
1721 		mlx5e_close_txqsq(&c->sq[tc]);
1722 }
1723 
mlx5e_set_sq_maxrate(struct net_device * dev,struct mlx5e_txqsq * sq,u32 rate)1724 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1725 				struct mlx5e_txqsq *sq, u32 rate)
1726 {
1727 	struct mlx5e_priv *priv = netdev_priv(dev);
1728 	struct mlx5_core_dev *mdev = priv->mdev;
1729 	struct mlx5e_modify_sq_param msp = {0};
1730 	struct mlx5_rate_limit rl = {0};
1731 	u16 rl_index = 0;
1732 	int err;
1733 
1734 	if (rate == sq->rate_limit)
1735 		/* nothing to do */
1736 		return 0;
1737 
1738 	if (sq->rate_limit) {
1739 		rl.rate = sq->rate_limit;
1740 		/* remove current rl index to free space to next ones */
1741 		mlx5_rl_remove_rate(mdev, &rl);
1742 	}
1743 
1744 	sq->rate_limit = 0;
1745 
1746 	if (rate) {
1747 		rl.rate = rate;
1748 		err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1749 		if (err) {
1750 			netdev_err(dev, "Failed configuring rate %u: %d\n",
1751 				   rate, err);
1752 			return err;
1753 		}
1754 	}
1755 
1756 	msp.curr_state = MLX5_SQC_STATE_RDY;
1757 	msp.next_state = MLX5_SQC_STATE_RDY;
1758 	msp.rl_index   = rl_index;
1759 	msp.rl_update  = true;
1760 	err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1761 	if (err) {
1762 		netdev_err(dev, "Failed configuring rate %u: %d\n",
1763 			   rate, err);
1764 		/* remove the rate from the table */
1765 		if (rate)
1766 			mlx5_rl_remove_rate(mdev, &rl);
1767 		return err;
1768 	}
1769 
1770 	sq->rate_limit = rate;
1771 	return 0;
1772 }
1773 
mlx5e_set_tx_maxrate(struct net_device * dev,int index,u32 rate)1774 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1775 {
1776 	struct mlx5e_priv *priv = netdev_priv(dev);
1777 	struct mlx5_core_dev *mdev = priv->mdev;
1778 	struct mlx5e_txqsq *sq = priv->txq2sq[index];
1779 	int err = 0;
1780 
1781 	if (!mlx5_rl_is_supported(mdev)) {
1782 		netdev_err(dev, "Rate limiting is not supported on this device\n");
1783 		return -EINVAL;
1784 	}
1785 
1786 	/* rate is given in Mb/sec, HW config is in Kb/sec */
1787 	rate = rate << 10;
1788 
1789 	/* Check whether rate in valid range, 0 is always valid */
1790 	if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1791 		netdev_err(dev, "TX rate %u, is not in range\n", rate);
1792 		return -ERANGE;
1793 	}
1794 
1795 	mutex_lock(&priv->state_lock);
1796 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1797 		err = mlx5e_set_sq_maxrate(dev, sq, rate);
1798 	if (!err)
1799 		priv->tx_rates[index] = rate;
1800 	mutex_unlock(&priv->state_lock);
1801 
1802 	return err;
1803 }
1804 
mlx5e_alloc_xps_cpumask(struct mlx5e_channel * c,struct mlx5e_params * params)1805 static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
1806 				   struct mlx5e_params *params)
1807 {
1808 	int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
1809 	int irq;
1810 
1811 	if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
1812 		return -ENOMEM;
1813 
1814 	for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
1815 		int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
1816 
1817 		cpumask_set_cpu(cpu, c->xps_cpumask);
1818 	}
1819 
1820 	return 0;
1821 }
1822 
mlx5e_free_xps_cpumask(struct mlx5e_channel * c)1823 static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
1824 {
1825 	free_cpumask_var(c->xps_cpumask);
1826 }
1827 
mlx5e_open_queues(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)1828 static int mlx5e_open_queues(struct mlx5e_channel *c,
1829 			     struct mlx5e_params *params,
1830 			     struct mlx5e_channel_param *cparam)
1831 {
1832 	struct dim_cq_moder icocq_moder = {0, 0};
1833 	int err;
1834 
1835 	err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1836 	if (err)
1837 		return err;
1838 
1839 	err = mlx5e_open_tx_cqs(c, params, cparam);
1840 	if (err)
1841 		goto err_close_icosq_cq;
1842 
1843 	err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
1844 	if (err)
1845 		goto err_close_tx_cqs;
1846 
1847 	err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1848 	if (err)
1849 		goto err_close_xdp_tx_cqs;
1850 
1851 	/* XDP SQ CQ params are same as normal TXQ sq CQ params */
1852 	err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1853 				     &cparam->tx_cq, &c->rq_xdpsq.cq) : 0;
1854 	if (err)
1855 		goto err_close_rx_cq;
1856 
1857 	napi_enable(&c->napi);
1858 
1859 	err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1860 	if (err)
1861 		goto err_disable_napi;
1862 
1863 	err = mlx5e_open_sqs(c, params, cparam);
1864 	if (err)
1865 		goto err_close_icosq;
1866 
1867 	if (c->xdp) {
1868 		err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
1869 				       &c->rq_xdpsq, false);
1870 		if (err)
1871 			goto err_close_sqs;
1872 	}
1873 
1874 	err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
1875 	if (err)
1876 		goto err_close_xdp_sq;
1877 
1878 	err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
1879 	if (err)
1880 		goto err_close_rq;
1881 
1882 	return 0;
1883 
1884 err_close_rq:
1885 	mlx5e_close_rq(&c->rq);
1886 
1887 err_close_xdp_sq:
1888 	if (c->xdp)
1889 		mlx5e_close_xdpsq(&c->rq_xdpsq);
1890 
1891 err_close_sqs:
1892 	mlx5e_close_sqs(c);
1893 
1894 err_close_icosq:
1895 	mlx5e_close_icosq(&c->icosq);
1896 
1897 err_disable_napi:
1898 	napi_disable(&c->napi);
1899 
1900 	if (c->xdp)
1901 		mlx5e_close_cq(&c->rq_xdpsq.cq);
1902 
1903 err_close_rx_cq:
1904 	mlx5e_close_cq(&c->rq.cq);
1905 
1906 err_close_xdp_tx_cqs:
1907 	mlx5e_close_cq(&c->xdpsq.cq);
1908 
1909 err_close_tx_cqs:
1910 	mlx5e_close_tx_cqs(c);
1911 
1912 err_close_icosq_cq:
1913 	mlx5e_close_cq(&c->icosq.cq);
1914 
1915 	return err;
1916 }
1917 
mlx5e_close_queues(struct mlx5e_channel * c)1918 static void mlx5e_close_queues(struct mlx5e_channel *c)
1919 {
1920 	mlx5e_close_xdpsq(&c->xdpsq);
1921 	mlx5e_close_rq(&c->rq);
1922 	if (c->xdp)
1923 		mlx5e_close_xdpsq(&c->rq_xdpsq);
1924 	mlx5e_close_sqs(c);
1925 	mlx5e_close_icosq(&c->icosq);
1926 	napi_disable(&c->napi);
1927 	if (c->xdp)
1928 		mlx5e_close_cq(&c->rq_xdpsq.cq);
1929 	mlx5e_close_cq(&c->rq.cq);
1930 	mlx5e_close_cq(&c->xdpsq.cq);
1931 	mlx5e_close_tx_cqs(c);
1932 	mlx5e_close_cq(&c->icosq.cq);
1933 }
1934 
mlx5e_enumerate_lag_port(struct mlx5_core_dev * mdev,int ix)1935 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
1936 {
1937 	u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
1938 
1939 	return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
1940 }
1941 
mlx5e_open_channel(struct mlx5e_priv * priv,int ix,struct mlx5e_params * params,struct mlx5e_channel_param * cparam,struct xdp_umem * umem,struct mlx5e_channel ** cp)1942 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1943 			      struct mlx5e_params *params,
1944 			      struct mlx5e_channel_param *cparam,
1945 			      struct xdp_umem *umem,
1946 			      struct mlx5e_channel **cp)
1947 {
1948 	int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
1949 	struct net_device *netdev = priv->netdev;
1950 	struct mlx5e_xsk_param xsk;
1951 	struct mlx5e_channel *c;
1952 	unsigned int irq;
1953 	int err;
1954 	int eqn;
1955 
1956 	err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1957 	if (err)
1958 		return err;
1959 
1960 	c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1961 	if (!c)
1962 		return -ENOMEM;
1963 
1964 	c->priv     = priv;
1965 	c->mdev     = priv->mdev;
1966 	c->tstamp   = &priv->tstamp;
1967 	c->ix       = ix;
1968 	c->cpu      = cpu;
1969 	c->pdev     = priv->mdev->device;
1970 	c->netdev   = priv->netdev;
1971 	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1972 	c->num_tc   = params->num_tc;
1973 	c->xdp      = !!params->xdp_prog;
1974 	c->stats    = &priv->channel_stats[ix].ch;
1975 	c->irq_desc = irq_to_desc(irq);
1976 	c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
1977 
1978 	err = mlx5e_alloc_xps_cpumask(c, params);
1979 	if (err)
1980 		goto err_free_channel;
1981 
1982 	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1983 
1984 	err = mlx5e_open_queues(c, params, cparam);
1985 	if (unlikely(err))
1986 		goto err_napi_del;
1987 
1988 	if (umem) {
1989 		mlx5e_build_xsk_param(umem, &xsk);
1990 		err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
1991 		if (unlikely(err))
1992 			goto err_close_queues;
1993 	}
1994 
1995 	*cp = c;
1996 
1997 	return 0;
1998 
1999 err_close_queues:
2000 	mlx5e_close_queues(c);
2001 
2002 err_napi_del:
2003 	netif_napi_del(&c->napi);
2004 	mlx5e_free_xps_cpumask(c);
2005 
2006 err_free_channel:
2007 	kvfree(c);
2008 
2009 	return err;
2010 }
2011 
mlx5e_activate_channel(struct mlx5e_channel * c)2012 static void mlx5e_activate_channel(struct mlx5e_channel *c)
2013 {
2014 	int tc;
2015 
2016 	for (tc = 0; tc < c->num_tc; tc++)
2017 		mlx5e_activate_txqsq(&c->sq[tc]);
2018 	mlx5e_activate_icosq(&c->icosq);
2019 	mlx5e_activate_rq(&c->rq);
2020 	netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
2021 
2022 	if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2023 		mlx5e_activate_xsk(c);
2024 }
2025 
mlx5e_deactivate_channel(struct mlx5e_channel * c)2026 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2027 {
2028 	int tc;
2029 
2030 	if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2031 		mlx5e_deactivate_xsk(c);
2032 
2033 	mlx5e_deactivate_rq(&c->rq);
2034 	mlx5e_deactivate_icosq(&c->icosq);
2035 	for (tc = 0; tc < c->num_tc; tc++)
2036 		mlx5e_deactivate_txqsq(&c->sq[tc]);
2037 }
2038 
mlx5e_close_channel(struct mlx5e_channel * c)2039 static void mlx5e_close_channel(struct mlx5e_channel *c)
2040 {
2041 	if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2042 		mlx5e_close_xsk(c);
2043 	mlx5e_close_queues(c);
2044 	netif_napi_del(&c->napi);
2045 	mlx5e_free_xps_cpumask(c);
2046 
2047 	kvfree(c);
2048 }
2049 
2050 #define DEFAULT_FRAG_SIZE (2048)
2051 
mlx5e_build_rq_frags_info(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_frags_info * info)2052 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2053 				      struct mlx5e_params *params,
2054 				      struct mlx5e_xsk_param *xsk,
2055 				      struct mlx5e_rq_frags_info *info)
2056 {
2057 	u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2058 	int frag_size_max = DEFAULT_FRAG_SIZE;
2059 	u32 buf_size = 0;
2060 	int i;
2061 
2062 #ifdef CONFIG_MLX5_EN_IPSEC
2063 	if (MLX5_IPSEC_DEV(mdev))
2064 		byte_count += MLX5E_METADATA_ETHER_LEN;
2065 #endif
2066 
2067 	if (mlx5e_rx_is_linear_skb(params, xsk)) {
2068 		int frag_stride;
2069 
2070 		frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
2071 		frag_stride = roundup_pow_of_two(frag_stride);
2072 
2073 		info->arr[0].frag_size = byte_count;
2074 		info->arr[0].frag_stride = frag_stride;
2075 		info->num_frags = 1;
2076 		info->wqe_bulk = PAGE_SIZE / frag_stride;
2077 		goto out;
2078 	}
2079 
2080 	if (byte_count > PAGE_SIZE +
2081 	    (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2082 		frag_size_max = PAGE_SIZE;
2083 
2084 	i = 0;
2085 	while (buf_size < byte_count) {
2086 		int frag_size = byte_count - buf_size;
2087 
2088 		if (i < MLX5E_MAX_RX_FRAGS - 1)
2089 			frag_size = min(frag_size, frag_size_max);
2090 
2091 		info->arr[i].frag_size = frag_size;
2092 		info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2093 
2094 		buf_size += frag_size;
2095 		i++;
2096 	}
2097 	info->num_frags = i;
2098 	/* number of different wqes sharing a page */
2099 	info->wqe_bulk = 1 + (info->num_frags % 2);
2100 
2101 out:
2102 	info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2103 	info->log_num_frags = order_base_2(info->num_frags);
2104 }
2105 
mlx5e_get_rqwq_log_stride(u8 wq_type,int ndsegs)2106 static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2107 {
2108 	int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2109 
2110 	switch (wq_type) {
2111 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2112 		sz += sizeof(struct mlx5e_rx_wqe_ll);
2113 		break;
2114 	default: /* MLX5_WQ_TYPE_CYCLIC */
2115 		sz += sizeof(struct mlx5e_rx_wqe_cyc);
2116 	}
2117 
2118 	return order_base_2(sz);
2119 }
2120 
mlx5e_get_rq_log_wq_sz(void * rqc)2121 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
2122 {
2123 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2124 
2125 	return MLX5_GET(wq, wq, log_wq_sz);
2126 }
2127 
mlx5e_build_rq_param(struct mlx5e_priv * priv,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_param * param)2128 void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2129 			  struct mlx5e_params *params,
2130 			  struct mlx5e_xsk_param *xsk,
2131 			  struct mlx5e_rq_param *param)
2132 {
2133 	struct mlx5_core_dev *mdev = priv->mdev;
2134 	void *rqc = param->rqc;
2135 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2136 	int ndsegs = 1;
2137 
2138 	switch (params->rq_wq_type) {
2139 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2140 		MLX5_SET(wq, wq, log_wqe_num_of_strides,
2141 			 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
2142 			 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
2143 		MLX5_SET(wq, wq, log_wqe_stride_size,
2144 			 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
2145 			 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
2146 		MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
2147 		break;
2148 	default: /* MLX5_WQ_TYPE_CYCLIC */
2149 		MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
2150 		mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
2151 		ndsegs = param->frags_info.num_frags;
2152 	}
2153 
2154 	MLX5_SET(wq, wq, wq_type,          params->rq_wq_type);
2155 	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
2156 	MLX5_SET(wq, wq, log_wq_stride,
2157 		 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
2158 	MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.pdn);
2159 	MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
2160 	MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
2161 	MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
2162 
2163 	param->wq.buf_numa_node = dev_to_node(mdev->device);
2164 }
2165 
mlx5e_build_drop_rq_param(struct mlx5e_priv * priv,struct mlx5e_rq_param * param)2166 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2167 				      struct mlx5e_rq_param *param)
2168 {
2169 	struct mlx5_core_dev *mdev = priv->mdev;
2170 	void *rqc = param->rqc;
2171 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2172 
2173 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2174 	MLX5_SET(wq, wq, log_wq_stride,
2175 		 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
2176 	MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2177 
2178 	param->wq.buf_numa_node = dev_to_node(mdev->device);
2179 }
2180 
mlx5e_build_sq_param_common(struct mlx5e_priv * priv,struct mlx5e_sq_param * param)2181 void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2182 				 struct mlx5e_sq_param *param)
2183 {
2184 	void *sqc = param->sqc;
2185 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2186 
2187 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
2188 	MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
2189 
2190 	param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
2191 }
2192 
mlx5e_build_sq_param(struct mlx5e_priv * priv,struct mlx5e_params * params,struct mlx5e_sq_param * param)2193 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
2194 				 struct mlx5e_params *params,
2195 				 struct mlx5e_sq_param *param)
2196 {
2197 	void *sqc = param->sqc;
2198 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2199 	bool allow_swp;
2200 
2201 	allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
2202 		    !!MLX5_IPSEC_DEV(priv->mdev);
2203 	mlx5e_build_sq_param_common(priv, param);
2204 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2205 	MLX5_SET(sqc, sqc, allow_swp, allow_swp);
2206 }
2207 
mlx5e_build_common_cq_param(struct mlx5e_priv * priv,struct mlx5e_cq_param * param)2208 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2209 					struct mlx5e_cq_param *param)
2210 {
2211 	void *cqc = param->cqc;
2212 
2213 	MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
2214 	if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
2215 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
2216 }
2217 
mlx5e_build_rx_cq_param(struct mlx5e_priv * priv,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_cq_param * param)2218 void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2219 			     struct mlx5e_params *params,
2220 			     struct mlx5e_xsk_param *xsk,
2221 			     struct mlx5e_cq_param *param)
2222 {
2223 	struct mlx5_core_dev *mdev = priv->mdev;
2224 	void *cqc = param->cqc;
2225 	u8 log_cq_size;
2226 
2227 	switch (params->rq_wq_type) {
2228 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2229 		log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
2230 			mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
2231 		break;
2232 	default: /* MLX5_WQ_TYPE_CYCLIC */
2233 		log_cq_size = params->log_rq_mtu_frames;
2234 	}
2235 
2236 	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
2237 	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
2238 		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2239 		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2240 	}
2241 
2242 	mlx5e_build_common_cq_param(priv, param);
2243 	param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2244 }
2245 
mlx5e_build_tx_cq_param(struct mlx5e_priv * priv,struct mlx5e_params * params,struct mlx5e_cq_param * param)2246 void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2247 			     struct mlx5e_params *params,
2248 			     struct mlx5e_cq_param *param)
2249 {
2250 	void *cqc = param->cqc;
2251 
2252 	MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2253 
2254 	mlx5e_build_common_cq_param(priv, param);
2255 	param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2256 }
2257 
mlx5e_build_ico_cq_param(struct mlx5e_priv * priv,u8 log_wq_size,struct mlx5e_cq_param * param)2258 void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2259 			      u8 log_wq_size,
2260 			      struct mlx5e_cq_param *param)
2261 {
2262 	void *cqc = param->cqc;
2263 
2264 	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2265 
2266 	mlx5e_build_common_cq_param(priv, param);
2267 
2268 	param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2269 }
2270 
mlx5e_build_icosq_param(struct mlx5e_priv * priv,u8 log_wq_size,struct mlx5e_sq_param * param)2271 void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2272 			     u8 log_wq_size,
2273 			     struct mlx5e_sq_param *param)
2274 {
2275 	void *sqc = param->sqc;
2276 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2277 
2278 	mlx5e_build_sq_param_common(priv, param);
2279 
2280 	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2281 	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2282 }
2283 
mlx5e_build_xdpsq_param(struct mlx5e_priv * priv,struct mlx5e_params * params,struct mlx5e_sq_param * param)2284 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2285 			     struct mlx5e_params *params,
2286 			     struct mlx5e_sq_param *param)
2287 {
2288 	void *sqc = param->sqc;
2289 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2290 
2291 	mlx5e_build_sq_param_common(priv, param);
2292 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2293 	param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
2294 }
2295 
mlx5e_build_icosq_log_wq_sz(struct mlx5e_params * params,struct mlx5e_rq_param * rqp)2296 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
2297 				      struct mlx5e_rq_param *rqp)
2298 {
2299 	switch (params->rq_wq_type) {
2300 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2301 		return order_base_2(MLX5E_UMR_WQEBBS) +
2302 			mlx5e_get_rq_log_wq_sz(rqp->rqc);
2303 	default: /* MLX5_WQ_TYPE_CYCLIC */
2304 		return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2305 	}
2306 }
2307 
mlx5e_build_channel_param(struct mlx5e_priv * priv,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)2308 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2309 				      struct mlx5e_params *params,
2310 				      struct mlx5e_channel_param *cparam)
2311 {
2312 	u8 icosq_log_wq_sz;
2313 
2314 	mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
2315 
2316 	icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
2317 
2318 	mlx5e_build_sq_param(priv, params, &cparam->sq);
2319 	mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2320 	mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2321 	mlx5e_build_rx_cq_param(priv, params, NULL, &cparam->rx_cq);
2322 	mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2323 	mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2324 }
2325 
mlx5e_open_channels(struct mlx5e_priv * priv,struct mlx5e_channels * chs)2326 int mlx5e_open_channels(struct mlx5e_priv *priv,
2327 			struct mlx5e_channels *chs)
2328 {
2329 	struct mlx5e_channel_param *cparam;
2330 	int err = -ENOMEM;
2331 	int i;
2332 
2333 	chs->num = chs->params.num_channels;
2334 
2335 	chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2336 	cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2337 	if (!chs->c || !cparam)
2338 		goto err_free;
2339 
2340 	mlx5e_build_channel_param(priv, &chs->params, cparam);
2341 	for (i = 0; i < chs->num; i++) {
2342 		struct xdp_umem *umem = NULL;
2343 
2344 		if (chs->params.xdp_prog)
2345 			umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
2346 
2347 		err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
2348 		if (err)
2349 			goto err_close_channels;
2350 	}
2351 
2352 	mlx5e_health_channels_update(priv);
2353 	kvfree(cparam);
2354 	return 0;
2355 
2356 err_close_channels:
2357 	for (i--; i >= 0; i--)
2358 		mlx5e_close_channel(chs->c[i]);
2359 
2360 err_free:
2361 	kfree(chs->c);
2362 	kvfree(cparam);
2363 	chs->num = 0;
2364 	return err;
2365 }
2366 
mlx5e_activate_channels(struct mlx5e_channels * chs)2367 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2368 {
2369 	int i;
2370 
2371 	for (i = 0; i < chs->num; i++)
2372 		mlx5e_activate_channel(chs->c[i]);
2373 }
2374 
2375 #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
2376 
mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels * chs)2377 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2378 {
2379 	int err = 0;
2380 	int i;
2381 
2382 	for (i = 0; i < chs->num; i++) {
2383 		int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
2384 
2385 		err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
2386 
2387 		/* Don't wait on the XSK RQ, because the newer xdpsock sample
2388 		 * doesn't provide any Fill Ring entries at the setup stage.
2389 		 */
2390 	}
2391 
2392 	return err ? -ETIMEDOUT : 0;
2393 }
2394 
mlx5e_deactivate_channels(struct mlx5e_channels * chs)2395 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2396 {
2397 	int i;
2398 
2399 	for (i = 0; i < chs->num; i++)
2400 		mlx5e_deactivate_channel(chs->c[i]);
2401 }
2402 
mlx5e_close_channels(struct mlx5e_channels * chs)2403 void mlx5e_close_channels(struct mlx5e_channels *chs)
2404 {
2405 	int i;
2406 
2407 	for (i = 0; i < chs->num; i++)
2408 		mlx5e_close_channel(chs->c[i]);
2409 
2410 	kfree(chs->c);
2411 	chs->num = 0;
2412 }
2413 
2414 static int
mlx5e_create_rqt(struct mlx5e_priv * priv,int sz,struct mlx5e_rqt * rqt)2415 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2416 {
2417 	struct mlx5_core_dev *mdev = priv->mdev;
2418 	void *rqtc;
2419 	int inlen;
2420 	int err;
2421 	u32 *in;
2422 	int i;
2423 
2424 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2425 	in = kvzalloc(inlen, GFP_KERNEL);
2426 	if (!in)
2427 		return -ENOMEM;
2428 
2429 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2430 
2431 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2432 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2433 
2434 	for (i = 0; i < sz; i++)
2435 		MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2436 
2437 	err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2438 	if (!err)
2439 		rqt->enabled = true;
2440 
2441 	kvfree(in);
2442 	return err;
2443 }
2444 
mlx5e_destroy_rqt(struct mlx5e_priv * priv,struct mlx5e_rqt * rqt)2445 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2446 {
2447 	rqt->enabled = false;
2448 	mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2449 }
2450 
mlx5e_create_indirect_rqt(struct mlx5e_priv * priv)2451 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2452 {
2453 	struct mlx5e_rqt *rqt = &priv->indir_rqt;
2454 	int err;
2455 
2456 	err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2457 	if (err)
2458 		mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2459 	return err;
2460 }
2461 
mlx5e_create_direct_rqts(struct mlx5e_priv * priv,struct mlx5e_tir * tirs)2462 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
2463 {
2464 	int err;
2465 	int ix;
2466 
2467 	for (ix = 0; ix < priv->max_nch; ix++) {
2468 		err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
2469 		if (unlikely(err))
2470 			goto err_destroy_rqts;
2471 	}
2472 
2473 	return 0;
2474 
2475 err_destroy_rqts:
2476 	mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
2477 	for (ix--; ix >= 0; ix--)
2478 		mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
2479 
2480 	return err;
2481 }
2482 
mlx5e_destroy_direct_rqts(struct mlx5e_priv * priv,struct mlx5e_tir * tirs)2483 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
2484 {
2485 	int i;
2486 
2487 	for (i = 0; i < priv->max_nch; i++)
2488 		mlx5e_destroy_rqt(priv, &tirs[i].rqt);
2489 }
2490 
mlx5e_rx_hash_fn(int hfunc)2491 static int mlx5e_rx_hash_fn(int hfunc)
2492 {
2493 	return (hfunc == ETH_RSS_HASH_TOP) ?
2494 	       MLX5_RX_HASH_FN_TOEPLITZ :
2495 	       MLX5_RX_HASH_FN_INVERTED_XOR8;
2496 }
2497 
mlx5e_bits_invert(unsigned long a,int size)2498 int mlx5e_bits_invert(unsigned long a, int size)
2499 {
2500 	int inv = 0;
2501 	int i;
2502 
2503 	for (i = 0; i < size; i++)
2504 		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2505 
2506 	return inv;
2507 }
2508 
mlx5e_fill_rqt_rqns(struct mlx5e_priv * priv,int sz,struct mlx5e_redirect_rqt_param rrp,void * rqtc)2509 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2510 				struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2511 {
2512 	int i;
2513 
2514 	for (i = 0; i < sz; i++) {
2515 		u32 rqn;
2516 
2517 		if (rrp.is_rss) {
2518 			int ix = i;
2519 
2520 			if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2521 				ix = mlx5e_bits_invert(i, ilog2(sz));
2522 
2523 			ix = priv->rss_params.indirection_rqt[ix];
2524 			rqn = rrp.rss.channels->c[ix]->rq.rqn;
2525 		} else {
2526 			rqn = rrp.rqn;
2527 		}
2528 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2529 	}
2530 }
2531 
mlx5e_redirect_rqt(struct mlx5e_priv * priv,u32 rqtn,int sz,struct mlx5e_redirect_rqt_param rrp)2532 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2533 		       struct mlx5e_redirect_rqt_param rrp)
2534 {
2535 	struct mlx5_core_dev *mdev = priv->mdev;
2536 	void *rqtc;
2537 	int inlen;
2538 	u32 *in;
2539 	int err;
2540 
2541 	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2542 	in = kvzalloc(inlen, GFP_KERNEL);
2543 	if (!in)
2544 		return -ENOMEM;
2545 
2546 	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2547 
2548 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2549 	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2550 	mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2551 	err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2552 
2553 	kvfree(in);
2554 	return err;
2555 }
2556 
mlx5e_get_direct_rqn(struct mlx5e_priv * priv,int ix,struct mlx5e_redirect_rqt_param rrp)2557 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2558 				struct mlx5e_redirect_rqt_param rrp)
2559 {
2560 	if (!rrp.is_rss)
2561 		return rrp.rqn;
2562 
2563 	if (ix >= rrp.rss.channels->num)
2564 		return priv->drop_rq.rqn;
2565 
2566 	return rrp.rss.channels->c[ix]->rq.rqn;
2567 }
2568 
mlx5e_redirect_rqts(struct mlx5e_priv * priv,struct mlx5e_redirect_rqt_param rrp)2569 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2570 				struct mlx5e_redirect_rqt_param rrp)
2571 {
2572 	u32 rqtn;
2573 	int ix;
2574 
2575 	if (priv->indir_rqt.enabled) {
2576 		/* RSS RQ table */
2577 		rqtn = priv->indir_rqt.rqtn;
2578 		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2579 	}
2580 
2581 	for (ix = 0; ix < priv->max_nch; ix++) {
2582 		struct mlx5e_redirect_rqt_param direct_rrp = {
2583 			.is_rss = false,
2584 			{
2585 				.rqn    = mlx5e_get_direct_rqn(priv, ix, rrp)
2586 			},
2587 		};
2588 
2589 		/* Direct RQ Tables */
2590 		if (!priv->direct_tir[ix].rqt.enabled)
2591 			continue;
2592 
2593 		rqtn = priv->direct_tir[ix].rqt.rqtn;
2594 		mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2595 	}
2596 }
2597 
mlx5e_redirect_rqts_to_channels(struct mlx5e_priv * priv,struct mlx5e_channels * chs)2598 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2599 					    struct mlx5e_channels *chs)
2600 {
2601 	struct mlx5e_redirect_rqt_param rrp = {
2602 		.is_rss        = true,
2603 		{
2604 			.rss = {
2605 				.channels  = chs,
2606 				.hfunc     = priv->rss_params.hfunc,
2607 			}
2608 		},
2609 	};
2610 
2611 	mlx5e_redirect_rqts(priv, rrp);
2612 }
2613 
mlx5e_redirect_rqts_to_drop(struct mlx5e_priv * priv)2614 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2615 {
2616 	struct mlx5e_redirect_rqt_param drop_rrp = {
2617 		.is_rss = false,
2618 		{
2619 			.rqn = priv->drop_rq.rqn,
2620 		},
2621 	};
2622 
2623 	mlx5e_redirect_rqts(priv, drop_rrp);
2624 }
2625 
2626 static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
2627 	[MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2628 				.l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2629 				.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2630 	},
2631 	[MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2632 				.l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2633 				.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2634 	},
2635 	[MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2636 				.l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2637 				.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2638 	},
2639 	[MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2640 				.l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2641 				.rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2642 	},
2643 	[MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2644 				     .l4_prot_type = 0,
2645 				     .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2646 	},
2647 	[MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2648 				     .l4_prot_type = 0,
2649 				     .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2650 	},
2651 	[MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2652 				      .l4_prot_type = 0,
2653 				      .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2654 	},
2655 	[MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2656 				      .l4_prot_type = 0,
2657 				      .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2658 	},
2659 	[MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2660 			    .l4_prot_type = 0,
2661 			    .rx_hash_fields = MLX5_HASH_IP,
2662 	},
2663 	[MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2664 			    .l4_prot_type = 0,
2665 			    .rx_hash_fields = MLX5_HASH_IP,
2666 	},
2667 };
2668 
mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)2669 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
2670 {
2671 	return tirc_default_config[tt];
2672 }
2673 
mlx5e_build_tir_ctx_lro(struct mlx5e_params * params,void * tirc)2674 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2675 {
2676 	if (!params->lro_en)
2677 		return;
2678 
2679 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2680 
2681 	MLX5_SET(tirc, tirc, lro_enable_mask,
2682 		 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2683 		 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2684 	MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2685 		 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2686 	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2687 }
2688 
mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params * rss_params,const struct mlx5e_tirc_config * ttconfig,void * tirc,bool inner)2689 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
2690 				    const struct mlx5e_tirc_config *ttconfig,
2691 				    void *tirc, bool inner)
2692 {
2693 	void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2694 			     MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2695 
2696 	MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
2697 	if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
2698 		void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2699 					     rx_hash_toeplitz_key);
2700 		size_t len = MLX5_FLD_SZ_BYTES(tirc,
2701 					       rx_hash_toeplitz_key);
2702 
2703 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2704 		memcpy(rss_key, rss_params->toeplitz_hash_key, len);
2705 	}
2706 	MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2707 		 ttconfig->l3_prot_type);
2708 	MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2709 		 ttconfig->l4_prot_type);
2710 	MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2711 		 ttconfig->rx_hash_fields);
2712 }
2713 
mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config * ttconfig,enum mlx5e_traffic_types tt,u32 rx_hash_fields)2714 static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
2715 					enum mlx5e_traffic_types tt,
2716 					u32 rx_hash_fields)
2717 {
2718 	*ttconfig                = tirc_default_config[tt];
2719 	ttconfig->rx_hash_fields = rx_hash_fields;
2720 }
2721 
mlx5e_modify_tirs_hash(struct mlx5e_priv * priv,void * in,int inlen)2722 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
2723 {
2724 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2725 	struct mlx5e_rss_params *rss = &priv->rss_params;
2726 	struct mlx5_core_dev *mdev = priv->mdev;
2727 	int ctxlen = MLX5_ST_SZ_BYTES(tirc);
2728 	struct mlx5e_tirc_config ttconfig;
2729 	int tt;
2730 
2731 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
2732 
2733 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2734 		memset(tirc, 0, ctxlen);
2735 		mlx5e_update_rx_hash_fields(&ttconfig, tt,
2736 					    rss->rx_hash_fields[tt]);
2737 		mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
2738 		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
2739 	}
2740 
2741 	if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2742 		return;
2743 
2744 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2745 		memset(tirc, 0, ctxlen);
2746 		mlx5e_update_rx_hash_fields(&ttconfig, tt,
2747 					    rss->rx_hash_fields[tt]);
2748 		mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
2749 		mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
2750 				     inlen);
2751 	}
2752 }
2753 
mlx5e_modify_tirs_lro(struct mlx5e_priv * priv)2754 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2755 {
2756 	struct mlx5_core_dev *mdev = priv->mdev;
2757 
2758 	void *in;
2759 	void *tirc;
2760 	int inlen;
2761 	int err;
2762 	int tt;
2763 	int ix;
2764 
2765 	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2766 	in = kvzalloc(inlen, GFP_KERNEL);
2767 	if (!in)
2768 		return -ENOMEM;
2769 
2770 	MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2771 	tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2772 
2773 	mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2774 
2775 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2776 		err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2777 					   inlen);
2778 		if (err)
2779 			goto free_in;
2780 	}
2781 
2782 	for (ix = 0; ix < priv->max_nch; ix++) {
2783 		err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2784 					   in, inlen);
2785 		if (err)
2786 			goto free_in;
2787 	}
2788 
2789 free_in:
2790 	kvfree(in);
2791 
2792 	return err;
2793 }
2794 
mlx5e_set_mtu(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 mtu)2795 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2796 			 struct mlx5e_params *params, u16 mtu)
2797 {
2798 	u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2799 	int err;
2800 
2801 	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2802 	if (err)
2803 		return err;
2804 
2805 	/* Update vport context MTU */
2806 	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2807 	return 0;
2808 }
2809 
mlx5e_query_mtu(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 * mtu)2810 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2811 			    struct mlx5e_params *params, u16 *mtu)
2812 {
2813 	u16 hw_mtu = 0;
2814 	int err;
2815 
2816 	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2817 	if (err || !hw_mtu) /* fallback to port oper mtu */
2818 		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2819 
2820 	*mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2821 }
2822 
mlx5e_set_dev_port_mtu(struct mlx5e_priv * priv)2823 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2824 {
2825 	struct mlx5e_params *params = &priv->channels.params;
2826 	struct net_device *netdev = priv->netdev;
2827 	struct mlx5_core_dev *mdev = priv->mdev;
2828 	u16 mtu;
2829 	int err;
2830 
2831 	err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2832 	if (err)
2833 		return err;
2834 
2835 	mlx5e_query_mtu(mdev, params, &mtu);
2836 	if (mtu != params->sw_mtu)
2837 		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2838 			    __func__, mtu, params->sw_mtu);
2839 
2840 	params->sw_mtu = mtu;
2841 	return 0;
2842 }
2843 
mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv * priv)2844 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
2845 {
2846 	struct mlx5e_params *params = &priv->channels.params;
2847 	struct net_device *netdev   = priv->netdev;
2848 	struct mlx5_core_dev *mdev  = priv->mdev;
2849 	u16 max_mtu;
2850 
2851 	/* MTU range: 68 - hw-specific max */
2852 	netdev->min_mtu = ETH_MIN_MTU;
2853 
2854 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2855 	netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
2856 				ETH_MAX_MTU);
2857 }
2858 
mlx5e_netdev_set_tcs(struct net_device * netdev)2859 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2860 {
2861 	struct mlx5e_priv *priv = netdev_priv(netdev);
2862 	int nch = priv->channels.params.num_channels;
2863 	int ntc = priv->channels.params.num_tc;
2864 	int tc;
2865 
2866 	netdev_reset_tc(netdev);
2867 
2868 	if (ntc == 1)
2869 		return;
2870 
2871 	netdev_set_num_tc(netdev, ntc);
2872 
2873 	/* Map netdev TCs to offset 0
2874 	 * We have our own UP to TXQ mapping for QoS
2875 	 */
2876 	for (tc = 0; tc < ntc; tc++)
2877 		netdev_set_tc_queue(netdev, tc, nch, 0);
2878 }
2879 
mlx5e_build_txq_maps(struct mlx5e_priv * priv)2880 static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
2881 {
2882 	int i, ch;
2883 
2884 	ch = priv->channels.num;
2885 
2886 	for (i = 0; i < ch; i++) {
2887 		int tc;
2888 
2889 		for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
2890 			struct mlx5e_channel *c = priv->channels.c[i];
2891 			struct mlx5e_txqsq *sq = &c->sq[tc];
2892 
2893 			priv->txq2sq[sq->txq_ix] = sq;
2894 			priv->channel_tc2realtxq[i][tc] = i + tc * ch;
2895 		}
2896 	}
2897 }
2898 
mlx5e_activate_priv_channels(struct mlx5e_priv * priv)2899 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2900 {
2901 	int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2902 	int num_rxqs = priv->channels.num * priv->profile->rq_groups;
2903 	struct net_device *netdev = priv->netdev;
2904 
2905 	mlx5e_netdev_set_tcs(netdev);
2906 	netif_set_real_num_tx_queues(netdev, num_txqs);
2907 	netif_set_real_num_rx_queues(netdev, num_rxqs);
2908 
2909 	mlx5e_build_txq_maps(priv);
2910 	mlx5e_activate_channels(&priv->channels);
2911 	mlx5e_xdp_tx_enable(priv);
2912 	netif_tx_start_all_queues(priv->netdev);
2913 
2914 	if (mlx5e_is_vport_rep(priv))
2915 		mlx5e_add_sqs_fwd_rules(priv);
2916 
2917 	mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2918 	mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2919 
2920 	mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
2921 }
2922 
mlx5e_deactivate_priv_channels(struct mlx5e_priv * priv)2923 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2924 {
2925 	mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
2926 
2927 	mlx5e_redirect_rqts_to_drop(priv);
2928 
2929 	if (mlx5e_is_vport_rep(priv))
2930 		mlx5e_remove_sqs_fwd_rules(priv);
2931 
2932 	/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2933 	 * polling for inactive tx queues.
2934 	 */
2935 	netif_tx_stop_all_queues(priv->netdev);
2936 	netif_tx_disable(priv->netdev);
2937 	mlx5e_xdp_tx_disable(priv);
2938 	mlx5e_deactivate_channels(&priv->channels);
2939 }
2940 
mlx5e_switch_priv_channels(struct mlx5e_priv * priv,struct mlx5e_channels * new_chs,mlx5e_fp_hw_modify hw_modify)2941 static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2942 				       struct mlx5e_channels *new_chs,
2943 				       mlx5e_fp_hw_modify hw_modify)
2944 {
2945 	struct net_device *netdev = priv->netdev;
2946 	int new_num_txqs;
2947 	int carrier_ok;
2948 
2949 	new_num_txqs = new_chs->num * new_chs->params.num_tc;
2950 
2951 	carrier_ok = netif_carrier_ok(netdev);
2952 	netif_carrier_off(netdev);
2953 
2954 	if (new_num_txqs < netdev->real_num_tx_queues)
2955 		netif_set_real_num_tx_queues(netdev, new_num_txqs);
2956 
2957 	mlx5e_deactivate_priv_channels(priv);
2958 	mlx5e_close_channels(&priv->channels);
2959 
2960 	priv->channels = *new_chs;
2961 
2962 	/* New channels are ready to roll, modify HW settings if needed */
2963 	if (hw_modify)
2964 		hw_modify(priv);
2965 
2966 	priv->profile->update_rx(priv);
2967 	mlx5e_activate_priv_channels(priv);
2968 
2969 	/* return carrier back if needed */
2970 	if (carrier_ok)
2971 		netif_carrier_on(netdev);
2972 }
2973 
mlx5e_safe_switch_channels(struct mlx5e_priv * priv,struct mlx5e_channels * new_chs,mlx5e_fp_hw_modify hw_modify)2974 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
2975 			       struct mlx5e_channels *new_chs,
2976 			       mlx5e_fp_hw_modify hw_modify)
2977 {
2978 	int err;
2979 
2980 	err = mlx5e_open_channels(priv, new_chs);
2981 	if (err)
2982 		return err;
2983 
2984 	mlx5e_switch_priv_channels(priv, new_chs, hw_modify);
2985 	return 0;
2986 }
2987 
mlx5e_safe_reopen_channels(struct mlx5e_priv * priv)2988 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
2989 {
2990 	struct mlx5e_channels new_channels = {};
2991 
2992 	new_channels.params = priv->channels.params;
2993 	return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
2994 }
2995 
mlx5e_timestamp_init(struct mlx5e_priv * priv)2996 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2997 {
2998 	priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
2999 	priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
3000 }
3001 
mlx5e_open_locked(struct net_device * netdev)3002 int mlx5e_open_locked(struct net_device *netdev)
3003 {
3004 	struct mlx5e_priv *priv = netdev_priv(netdev);
3005 	int err;
3006 
3007 	set_bit(MLX5E_STATE_OPENED, &priv->state);
3008 
3009 	err = mlx5e_open_channels(priv, &priv->channels);
3010 	if (err)
3011 		goto err_clear_state_opened_flag;
3012 
3013 	priv->profile->update_rx(priv);
3014 	mlx5e_activate_priv_channels(priv);
3015 	if (priv->profile->update_carrier)
3016 		priv->profile->update_carrier(priv);
3017 
3018 	mlx5e_queue_update_stats(priv);
3019 	return 0;
3020 
3021 err_clear_state_opened_flag:
3022 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
3023 	return err;
3024 }
3025 
mlx5e_open(struct net_device * netdev)3026 int mlx5e_open(struct net_device *netdev)
3027 {
3028 	struct mlx5e_priv *priv = netdev_priv(netdev);
3029 	int err;
3030 
3031 	mutex_lock(&priv->state_lock);
3032 	err = mlx5e_open_locked(netdev);
3033 	if (!err)
3034 		mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
3035 	mutex_unlock(&priv->state_lock);
3036 
3037 	if (mlx5_vxlan_allowed(priv->mdev->vxlan))
3038 		udp_tunnel_get_rx_info(netdev);
3039 
3040 	return err;
3041 }
3042 
mlx5e_close_locked(struct net_device * netdev)3043 int mlx5e_close_locked(struct net_device *netdev)
3044 {
3045 	struct mlx5e_priv *priv = netdev_priv(netdev);
3046 
3047 	/* May already be CLOSED in case a previous configuration operation
3048 	 * (e.g RX/TX queue size change) that involves close&open failed.
3049 	 */
3050 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3051 		return 0;
3052 
3053 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
3054 
3055 	netif_carrier_off(priv->netdev);
3056 	mlx5e_deactivate_priv_channels(priv);
3057 	mlx5e_close_channels(&priv->channels);
3058 
3059 	return 0;
3060 }
3061 
mlx5e_close(struct net_device * netdev)3062 int mlx5e_close(struct net_device *netdev)
3063 {
3064 	struct mlx5e_priv *priv = netdev_priv(netdev);
3065 	int err;
3066 
3067 	if (!netif_device_present(netdev))
3068 		return -ENODEV;
3069 
3070 	mutex_lock(&priv->state_lock);
3071 	mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
3072 	err = mlx5e_close_locked(netdev);
3073 	mutex_unlock(&priv->state_lock);
3074 
3075 	return err;
3076 }
3077 
mlx5e_alloc_drop_rq(struct mlx5_core_dev * mdev,struct mlx5e_rq * rq,struct mlx5e_rq_param * param)3078 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3079 			       struct mlx5e_rq *rq,
3080 			       struct mlx5e_rq_param *param)
3081 {
3082 	void *rqc = param->rqc;
3083 	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3084 	int err;
3085 
3086 	param->wq.db_numa_node = param->wq.buf_numa_node;
3087 
3088 	err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
3089 				 &rq->wq_ctrl);
3090 	if (err)
3091 		return err;
3092 
3093 	/* Mark as unused given "Drop-RQ" packets never reach XDP */
3094 	xdp_rxq_info_unused(&rq->xdp_rxq);
3095 
3096 	rq->mdev = mdev;
3097 
3098 	return 0;
3099 }
3100 
mlx5e_alloc_drop_cq(struct mlx5_core_dev * mdev,struct mlx5e_cq * cq,struct mlx5e_cq_param * param)3101 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3102 			       struct mlx5e_cq *cq,
3103 			       struct mlx5e_cq_param *param)
3104 {
3105 	param->wq.buf_numa_node = dev_to_node(mdev->device);
3106 	param->wq.db_numa_node  = dev_to_node(mdev->device);
3107 
3108 	return mlx5e_alloc_cq_common(mdev, param, cq);
3109 }
3110 
mlx5e_open_drop_rq(struct mlx5e_priv * priv,struct mlx5e_rq * drop_rq)3111 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3112 		       struct mlx5e_rq *drop_rq)
3113 {
3114 	struct mlx5_core_dev *mdev = priv->mdev;
3115 	struct mlx5e_cq_param cq_param = {};
3116 	struct mlx5e_rq_param rq_param = {};
3117 	struct mlx5e_cq *cq = &drop_rq->cq;
3118 	int err;
3119 
3120 	mlx5e_build_drop_rq_param(priv, &rq_param);
3121 
3122 	err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
3123 	if (err)
3124 		return err;
3125 
3126 	err = mlx5e_create_cq(cq, &cq_param);
3127 	if (err)
3128 		goto err_free_cq;
3129 
3130 	err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3131 	if (err)
3132 		goto err_destroy_cq;
3133 
3134 	err = mlx5e_create_rq(drop_rq, &rq_param);
3135 	if (err)
3136 		goto err_free_rq;
3137 
3138 	err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3139 	if (err)
3140 		mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3141 
3142 	return 0;
3143 
3144 err_free_rq:
3145 	mlx5e_free_rq(drop_rq);
3146 
3147 err_destroy_cq:
3148 	mlx5e_destroy_cq(cq);
3149 
3150 err_free_cq:
3151 	mlx5e_free_cq(cq);
3152 
3153 	return err;
3154 }
3155 
mlx5e_close_drop_rq(struct mlx5e_rq * drop_rq)3156 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3157 {
3158 	mlx5e_destroy_rq(drop_rq);
3159 	mlx5e_free_rq(drop_rq);
3160 	mlx5e_destroy_cq(&drop_rq->cq);
3161 	mlx5e_free_cq(&drop_rq->cq);
3162 }
3163 
mlx5e_create_tis(struct mlx5_core_dev * mdev,void * in,u32 * tisn)3164 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
3165 {
3166 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3167 
3168 	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
3169 
3170 	if (MLX5_GET(tisc, tisc, tls_en))
3171 		MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
3172 
3173 	if (mlx5_lag_is_lacp_owner(mdev))
3174 		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3175 
3176 	return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
3177 }
3178 
mlx5e_destroy_tis(struct mlx5_core_dev * mdev,u32 tisn)3179 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
3180 {
3181 	mlx5_core_destroy_tis(mdev, tisn);
3182 }
3183 
mlx5e_destroy_tises(struct mlx5e_priv * priv)3184 void mlx5e_destroy_tises(struct mlx5e_priv *priv)
3185 {
3186 	int tc, i;
3187 
3188 	for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
3189 		for (tc = 0; tc < priv->profile->max_tc; tc++)
3190 			mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3191 }
3192 
mlx5e_lag_should_assign_affinity(struct mlx5_core_dev * mdev)3193 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
3194 {
3195 	return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
3196 }
3197 
mlx5e_create_tises(struct mlx5e_priv * priv)3198 int mlx5e_create_tises(struct mlx5e_priv *priv)
3199 {
3200 	int tc, i;
3201 	int err;
3202 
3203 	for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
3204 		for (tc = 0; tc < priv->profile->max_tc; tc++) {
3205 			u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3206 			void *tisc;
3207 
3208 			tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3209 
3210 			MLX5_SET(tisc, tisc, prio, tc << 1);
3211 
3212 			if (mlx5e_lag_should_assign_affinity(priv->mdev))
3213 				MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
3214 
3215 			err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
3216 			if (err)
3217 				goto err_close_tises;
3218 		}
3219 	}
3220 
3221 	return 0;
3222 
3223 err_close_tises:
3224 	for (; i >= 0; i--) {
3225 		for (tc--; tc >= 0; tc--)
3226 			mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3227 		tc = priv->profile->max_tc;
3228 	}
3229 
3230 	return err;
3231 }
3232 
mlx5e_cleanup_nic_tx(struct mlx5e_priv * priv)3233 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3234 {
3235 	mlx5e_destroy_tises(priv);
3236 }
3237 
mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv * priv,u32 rqtn,u32 * tirc)3238 static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
3239 					     u32 rqtn, u32 *tirc)
3240 {
3241 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3242 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3243 	MLX5_SET(tirc, tirc, indirect_table, rqtn);
3244 	MLX5_SET(tirc, tirc, tunneled_offload_en,
3245 		 priv->channels.params.tunneled_offload_en);
3246 
3247 	mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
3248 }
3249 
mlx5e_build_indir_tir_ctx(struct mlx5e_priv * priv,enum mlx5e_traffic_types tt,u32 * tirc)3250 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3251 				      enum mlx5e_traffic_types tt,
3252 				      u32 *tirc)
3253 {
3254 	mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3255 	mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3256 				       &tirc_default_config[tt], tirc, false);
3257 }
3258 
mlx5e_build_direct_tir_ctx(struct mlx5e_priv * priv,u32 rqtn,u32 * tirc)3259 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
3260 {
3261 	mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
3262 	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3263 }
3264 
mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv * priv,enum mlx5e_traffic_types tt,u32 * tirc)3265 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
3266 					    enum mlx5e_traffic_types tt,
3267 					    u32 *tirc)
3268 {
3269 	mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3270 	mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3271 				       &tirc_default_config[tt], tirc, true);
3272 }
3273 
mlx5e_create_indirect_tirs(struct mlx5e_priv * priv,bool inner_ttc)3274 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
3275 {
3276 	struct mlx5e_tir *tir;
3277 	void *tirc;
3278 	int inlen;
3279 	int i = 0;
3280 	int err;
3281 	u32 *in;
3282 	int tt;
3283 
3284 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3285 	in = kvzalloc(inlen, GFP_KERNEL);
3286 	if (!in)
3287 		return -ENOMEM;
3288 
3289 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3290 		memset(in, 0, inlen);
3291 		tir = &priv->indir_tir[tt];
3292 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3293 		mlx5e_build_indir_tir_ctx(priv, tt, tirc);
3294 		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3295 		if (err) {
3296 			mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3297 			goto err_destroy_inner_tirs;
3298 		}
3299 	}
3300 
3301 	if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
3302 		goto out;
3303 
3304 	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3305 		memset(in, 0, inlen);
3306 		tir = &priv->inner_indir_tir[i];
3307 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3308 		mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3309 		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3310 		if (err) {
3311 			mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3312 			goto err_destroy_inner_tirs;
3313 		}
3314 	}
3315 
3316 out:
3317 	kvfree(in);
3318 
3319 	return 0;
3320 
3321 err_destroy_inner_tirs:
3322 	for (i--; i >= 0; i--)
3323 		mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3324 
3325 	for (tt--; tt >= 0; tt--)
3326 		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3327 
3328 	kvfree(in);
3329 
3330 	return err;
3331 }
3332 
mlx5e_create_direct_tirs(struct mlx5e_priv * priv,struct mlx5e_tir * tirs)3333 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3334 {
3335 	struct mlx5e_tir *tir;
3336 	void *tirc;
3337 	int inlen;
3338 	int err = 0;
3339 	u32 *in;
3340 	int ix;
3341 
3342 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3343 	in = kvzalloc(inlen, GFP_KERNEL);
3344 	if (!in)
3345 		return -ENOMEM;
3346 
3347 	for (ix = 0; ix < priv->max_nch; ix++) {
3348 		memset(in, 0, inlen);
3349 		tir = &tirs[ix];
3350 		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3351 		mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
3352 		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3353 		if (unlikely(err))
3354 			goto err_destroy_ch_tirs;
3355 	}
3356 
3357 	goto out;
3358 
3359 err_destroy_ch_tirs:
3360 	mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
3361 	for (ix--; ix >= 0; ix--)
3362 		mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
3363 
3364 out:
3365 	kvfree(in);
3366 
3367 	return err;
3368 }
3369 
mlx5e_destroy_indirect_tirs(struct mlx5e_priv * priv,bool inner_ttc)3370 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
3371 {
3372 	int i;
3373 
3374 	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3375 		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3376 
3377 	if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
3378 		return;
3379 
3380 	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3381 		mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3382 }
3383 
mlx5e_destroy_direct_tirs(struct mlx5e_priv * priv,struct mlx5e_tir * tirs)3384 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3385 {
3386 	int i;
3387 
3388 	for (i = 0; i < priv->max_nch; i++)
3389 		mlx5e_destroy_tir(priv->mdev, &tirs[i]);
3390 }
3391 
mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels * chs,bool enable)3392 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3393 {
3394 	int err = 0;
3395 	int i;
3396 
3397 	for (i = 0; i < chs->num; i++) {
3398 		err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3399 		if (err)
3400 			return err;
3401 	}
3402 
3403 	return 0;
3404 }
3405 
mlx5e_modify_channels_vsd(struct mlx5e_channels * chs,bool vsd)3406 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3407 {
3408 	int err = 0;
3409 	int i;
3410 
3411 	for (i = 0; i < chs->num; i++) {
3412 		err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3413 		if (err)
3414 			return err;
3415 	}
3416 
3417 	return 0;
3418 }
3419 
mlx5e_setup_tc_mqprio(struct mlx5e_priv * priv,struct tc_mqprio_qopt * mqprio)3420 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
3421 				 struct tc_mqprio_qopt *mqprio)
3422 {
3423 	struct mlx5e_channels new_channels = {};
3424 	u8 tc = mqprio->num_tc;
3425 	int err = 0;
3426 
3427 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3428 
3429 	if (tc && tc != MLX5E_MAX_NUM_TC)
3430 		return -EINVAL;
3431 
3432 	mutex_lock(&priv->state_lock);
3433 
3434 	new_channels.params = priv->channels.params;
3435 	new_channels.params.num_tc = tc ? tc : 1;
3436 
3437 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3438 		priv->channels.params = new_channels.params;
3439 		goto out;
3440 	}
3441 
3442 	err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
3443 	if (err)
3444 		goto out;
3445 
3446 	priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3447 				    new_channels.params.num_tc);
3448 out:
3449 	mutex_unlock(&priv->state_lock);
3450 	return err;
3451 }
3452 
3453 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_setup_tc_cls_flower(struct mlx5e_priv * priv,struct flow_cls_offload * cls_flower,unsigned long flags)3454 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
3455 				     struct flow_cls_offload *cls_flower,
3456 				     unsigned long flags)
3457 {
3458 	switch (cls_flower->command) {
3459 	case FLOW_CLS_REPLACE:
3460 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
3461 					      flags);
3462 	case FLOW_CLS_DESTROY:
3463 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
3464 					   flags);
3465 	case FLOW_CLS_STATS:
3466 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
3467 					  flags);
3468 	default:
3469 		return -EOPNOTSUPP;
3470 	}
3471 }
3472 
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)3473 static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3474 				   void *cb_priv)
3475 {
3476 	unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
3477 	struct mlx5e_priv *priv = cb_priv;
3478 
3479 	switch (type) {
3480 	case TC_SETUP_CLSFLOWER:
3481 		return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
3482 	default:
3483 		return -EOPNOTSUPP;
3484 	}
3485 }
3486 #endif
3487 
3488 static LIST_HEAD(mlx5e_block_cb_list);
3489 
mlx5e_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)3490 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3491 			  void *type_data)
3492 {
3493 	struct mlx5e_priv *priv = netdev_priv(dev);
3494 
3495 	switch (type) {
3496 #ifdef CONFIG_MLX5_ESWITCH
3497 	case TC_SETUP_BLOCK: {
3498 		struct flow_block_offload *f = type_data;
3499 
3500 		f->unlocked_driver_cb = true;
3501 		return flow_block_cb_setup_simple(type_data,
3502 						  &mlx5e_block_cb_list,
3503 						  mlx5e_setup_tc_block_cb,
3504 						  priv, priv, true);
3505 	}
3506 #endif
3507 	case TC_SETUP_QDISC_MQPRIO:
3508 		return mlx5e_setup_tc_mqprio(priv, type_data);
3509 	default:
3510 		return -EOPNOTSUPP;
3511 	}
3512 }
3513 
mlx5e_fold_sw_stats64(struct mlx5e_priv * priv,struct rtnl_link_stats64 * s)3514 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
3515 {
3516 	int i;
3517 
3518 	for (i = 0; i < priv->max_nch; i++) {
3519 		struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
3520 		struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
3521 		struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3522 		int j;
3523 
3524 		s->rx_packets   += rq_stats->packets + xskrq_stats->packets;
3525 		s->rx_bytes     += rq_stats->bytes + xskrq_stats->bytes;
3526 
3527 		for (j = 0; j < priv->max_opened_tc; j++) {
3528 			struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3529 
3530 			s->tx_packets    += sq_stats->packets;
3531 			s->tx_bytes      += sq_stats->bytes;
3532 			s->tx_dropped    += sq_stats->dropped;
3533 		}
3534 	}
3535 }
3536 
3537 void
mlx5e_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)3538 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3539 {
3540 	struct mlx5e_priv *priv = netdev_priv(dev);
3541 	struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3542 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3543 
3544 	if (!mlx5e_monitor_counter_supported(priv)) {
3545 		/* update HW stats in background for next time */
3546 		mlx5e_queue_update_stats(priv);
3547 	}
3548 
3549 	if (mlx5e_is_uplink_rep(priv)) {
3550 		stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3551 		stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
3552 		stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3553 		stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3554 	} else {
3555 		mlx5e_fold_sw_stats64(priv, stats);
3556 	}
3557 
3558 	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3559 
3560 	stats->rx_length_errors =
3561 		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3562 		PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3563 		PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3564 	stats->rx_crc_errors =
3565 		PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3566 	stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3567 	stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3568 	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3569 			   stats->rx_frame_errors;
3570 	stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3571 
3572 	/* vport multicast also counts packets that are dropped due to steering
3573 	 * or rx out of buffer
3574 	 */
3575 	stats->multicast =
3576 		VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3577 }
3578 
mlx5e_set_rx_mode(struct net_device * dev)3579 static void mlx5e_set_rx_mode(struct net_device *dev)
3580 {
3581 	struct mlx5e_priv *priv = netdev_priv(dev);
3582 
3583 	queue_work(priv->wq, &priv->set_rx_mode_work);
3584 }
3585 
mlx5e_set_mac(struct net_device * netdev,void * addr)3586 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3587 {
3588 	struct mlx5e_priv *priv = netdev_priv(netdev);
3589 	struct sockaddr *saddr = addr;
3590 
3591 	if (!is_valid_ether_addr(saddr->sa_data))
3592 		return -EADDRNOTAVAIL;
3593 
3594 	netif_addr_lock_bh(netdev);
3595 	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3596 	netif_addr_unlock_bh(netdev);
3597 
3598 	queue_work(priv->wq, &priv->set_rx_mode_work);
3599 
3600 	return 0;
3601 }
3602 
3603 #define MLX5E_SET_FEATURE(features, feature, enable)	\
3604 	do {						\
3605 		if (enable)				\
3606 			*features |= feature;		\
3607 		else					\
3608 			*features &= ~feature;		\
3609 	} while (0)
3610 
3611 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3612 
set_feature_lro(struct net_device * netdev,bool enable)3613 static int set_feature_lro(struct net_device *netdev, bool enable)
3614 {
3615 	struct mlx5e_priv *priv = netdev_priv(netdev);
3616 	struct mlx5_core_dev *mdev = priv->mdev;
3617 	struct mlx5e_channels new_channels = {};
3618 	struct mlx5e_params *old_params;
3619 	int err = 0;
3620 	bool reset;
3621 
3622 	mutex_lock(&priv->state_lock);
3623 
3624 	if (enable && priv->xsk.refcnt) {
3625 		netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
3626 			    priv->xsk.refcnt);
3627 		err = -EINVAL;
3628 		goto out;
3629 	}
3630 
3631 	old_params = &priv->channels.params;
3632 	if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3633 		netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3634 		err = -EINVAL;
3635 		goto out;
3636 	}
3637 
3638 	reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
3639 
3640 	new_channels.params = *old_params;
3641 	new_channels.params.lro_en = enable;
3642 
3643 	if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
3644 		if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
3645 		    mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
3646 			reset = false;
3647 	}
3648 
3649 	if (!reset) {
3650 		*old_params = new_channels.params;
3651 		err = mlx5e_modify_tirs_lro(priv);
3652 		goto out;
3653 	}
3654 
3655 	err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3656 out:
3657 	mutex_unlock(&priv->state_lock);
3658 	return err;
3659 }
3660 
set_feature_cvlan_filter(struct net_device * netdev,bool enable)3661 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3662 {
3663 	struct mlx5e_priv *priv = netdev_priv(netdev);
3664 
3665 	if (enable)
3666 		mlx5e_enable_cvlan_filter(priv);
3667 	else
3668 		mlx5e_disable_cvlan_filter(priv);
3669 
3670 	return 0;
3671 }
3672 
3673 #ifdef CONFIG_MLX5_ESWITCH
set_feature_tc_num_filters(struct net_device * netdev,bool enable)3674 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3675 {
3676 	struct mlx5e_priv *priv = netdev_priv(netdev);
3677 
3678 	if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
3679 		netdev_err(netdev,
3680 			   "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3681 		return -EINVAL;
3682 	}
3683 
3684 	return 0;
3685 }
3686 #endif
3687 
set_feature_rx_all(struct net_device * netdev,bool enable)3688 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3689 {
3690 	struct mlx5e_priv *priv = netdev_priv(netdev);
3691 	struct mlx5_core_dev *mdev = priv->mdev;
3692 
3693 	return mlx5_set_port_fcs(mdev, !enable);
3694 }
3695 
set_feature_rx_fcs(struct net_device * netdev,bool enable)3696 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3697 {
3698 	struct mlx5e_priv *priv = netdev_priv(netdev);
3699 	int err;
3700 
3701 	mutex_lock(&priv->state_lock);
3702 
3703 	priv->channels.params.scatter_fcs_en = enable;
3704 	err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3705 	if (err)
3706 		priv->channels.params.scatter_fcs_en = !enable;
3707 
3708 	mutex_unlock(&priv->state_lock);
3709 
3710 	return err;
3711 }
3712 
set_feature_rx_vlan(struct net_device * netdev,bool enable)3713 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3714 {
3715 	struct mlx5e_priv *priv = netdev_priv(netdev);
3716 	int err = 0;
3717 
3718 	mutex_lock(&priv->state_lock);
3719 
3720 	priv->channels.params.vlan_strip_disable = !enable;
3721 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3722 		goto unlock;
3723 
3724 	err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3725 	if (err)
3726 		priv->channels.params.vlan_strip_disable = enable;
3727 
3728 unlock:
3729 	mutex_unlock(&priv->state_lock);
3730 
3731 	return err;
3732 }
3733 
3734 #ifdef CONFIG_MLX5_EN_ARFS
set_feature_arfs(struct net_device * netdev,bool enable)3735 static int set_feature_arfs(struct net_device *netdev, bool enable)
3736 {
3737 	struct mlx5e_priv *priv = netdev_priv(netdev);
3738 	int err;
3739 
3740 	if (enable)
3741 		err = mlx5e_arfs_enable(priv);
3742 	else
3743 		err = mlx5e_arfs_disable(priv);
3744 
3745 	return err;
3746 }
3747 #endif
3748 
mlx5e_handle_feature(struct net_device * netdev,netdev_features_t * features,netdev_features_t wanted_features,netdev_features_t feature,mlx5e_feature_handler feature_handler)3749 static int mlx5e_handle_feature(struct net_device *netdev,
3750 				netdev_features_t *features,
3751 				netdev_features_t wanted_features,
3752 				netdev_features_t feature,
3753 				mlx5e_feature_handler feature_handler)
3754 {
3755 	netdev_features_t changes = wanted_features ^ netdev->features;
3756 	bool enable = !!(wanted_features & feature);
3757 	int err;
3758 
3759 	if (!(changes & feature))
3760 		return 0;
3761 
3762 	err = feature_handler(netdev, enable);
3763 	if (err) {
3764 		netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3765 			   enable ? "Enable" : "Disable", &feature, err);
3766 		return err;
3767 	}
3768 
3769 	MLX5E_SET_FEATURE(features, feature, enable);
3770 	return 0;
3771 }
3772 
mlx5e_set_features(struct net_device * netdev,netdev_features_t features)3773 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
3774 {
3775 	netdev_features_t oper_features = netdev->features;
3776 	int err = 0;
3777 
3778 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3779 	mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3780 
3781 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3782 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3783 				    set_feature_cvlan_filter);
3784 #ifdef CONFIG_MLX5_ESWITCH
3785 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3786 #endif
3787 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3788 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3789 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3790 #ifdef CONFIG_MLX5_EN_ARFS
3791 	err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
3792 #endif
3793 
3794 	if (err) {
3795 		netdev->features = oper_features;
3796 		return -EINVAL;
3797 	}
3798 
3799 	return 0;
3800 }
3801 
mlx5e_fix_features(struct net_device * netdev,netdev_features_t features)3802 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3803 					    netdev_features_t features)
3804 {
3805 	struct mlx5e_priv *priv = netdev_priv(netdev);
3806 	struct mlx5e_params *params;
3807 
3808 	mutex_lock(&priv->state_lock);
3809 	params = &priv->channels.params;
3810 	if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3811 		/* HW strips the outer C-tag header, this is a problem
3812 		 * for S-tag traffic.
3813 		 */
3814 		features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3815 		if (!params->vlan_strip_disable)
3816 			netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3817 	}
3818 	if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3819 		if (features & NETIF_F_LRO) {
3820 			netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
3821 			features &= ~NETIF_F_LRO;
3822 		}
3823 	}
3824 
3825 	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
3826 		features &= ~NETIF_F_RXHASH;
3827 		if (netdev->features & NETIF_F_RXHASH)
3828 			netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
3829 	}
3830 
3831 	mutex_unlock(&priv->state_lock);
3832 
3833 	return features;
3834 }
3835 
mlx5e_xsk_validate_mtu(struct net_device * netdev,struct mlx5e_channels * chs,struct mlx5e_params * new_params,struct mlx5_core_dev * mdev)3836 static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
3837 				   struct mlx5e_channels *chs,
3838 				   struct mlx5e_params *new_params,
3839 				   struct mlx5_core_dev *mdev)
3840 {
3841 	u16 ix;
3842 
3843 	for (ix = 0; ix < chs->params.num_channels; ix++) {
3844 		struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
3845 		struct mlx5e_xsk_param xsk;
3846 
3847 		if (!umem)
3848 			continue;
3849 
3850 		mlx5e_build_xsk_param(umem, &xsk);
3851 
3852 		if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
3853 			u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
3854 			int max_mtu_frame, max_mtu_page, max_mtu;
3855 
3856 			/* Two criteria must be met:
3857 			 * 1. HW MTU + all headrooms <= XSK frame size.
3858 			 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
3859 			 */
3860 			max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
3861 			max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
3862 			max_mtu = min(max_mtu_frame, max_mtu_page);
3863 
3864 			netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
3865 				   new_params->sw_mtu, ix, max_mtu);
3866 			return false;
3867 		}
3868 	}
3869 
3870 	return true;
3871 }
3872 
mlx5e_change_mtu(struct net_device * netdev,int new_mtu,change_hw_mtu_cb set_mtu_cb)3873 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3874 		     change_hw_mtu_cb set_mtu_cb)
3875 {
3876 	struct mlx5e_priv *priv = netdev_priv(netdev);
3877 	struct mlx5e_channels new_channels = {};
3878 	struct mlx5e_params *params;
3879 	int err = 0;
3880 	bool reset;
3881 
3882 	mutex_lock(&priv->state_lock);
3883 
3884 	params = &priv->channels.params;
3885 
3886 	reset = !params->lro_en;
3887 	reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3888 
3889 	new_channels.params = *params;
3890 	new_channels.params.sw_mtu = new_mtu;
3891 
3892 	if (params->xdp_prog &&
3893 	    !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
3894 		netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
3895 			   new_mtu, mlx5e_xdp_max_mtu(params, NULL));
3896 		err = -EINVAL;
3897 		goto out;
3898 	}
3899 
3900 	if (priv->xsk.refcnt &&
3901 	    !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
3902 				    &new_channels.params, priv->mdev)) {
3903 		err = -EINVAL;
3904 		goto out;
3905 	}
3906 
3907 	if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3908 		bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
3909 							      &new_channels.params,
3910 							      NULL);
3911 		u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
3912 		u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
3913 
3914 		/* If XSK is active, XSK RQs are linear. */
3915 		is_linear |= priv->xsk.refcnt;
3916 
3917 		/* Always reset in linear mode - hw_mtu is used in data path. */
3918 		reset = reset && (is_linear || (ppw_old != ppw_new));
3919 	}
3920 
3921 	if (!reset) {
3922 		params->sw_mtu = new_mtu;
3923 		if (set_mtu_cb)
3924 			set_mtu_cb(priv);
3925 		netdev->mtu = params->sw_mtu;
3926 		goto out;
3927 	}
3928 
3929 	err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb);
3930 	if (err)
3931 		goto out;
3932 
3933 	netdev->mtu = new_channels.params.sw_mtu;
3934 
3935 out:
3936 	mutex_unlock(&priv->state_lock);
3937 	return err;
3938 }
3939 
mlx5e_change_nic_mtu(struct net_device * netdev,int new_mtu)3940 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
3941 {
3942 	return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
3943 }
3944 
mlx5e_hwstamp_set(struct mlx5e_priv * priv,struct ifreq * ifr)3945 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3946 {
3947 	struct hwtstamp_config config;
3948 	int err;
3949 
3950 	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
3951 	    (mlx5_clock_get_ptp_index(priv->mdev) == -1))
3952 		return -EOPNOTSUPP;
3953 
3954 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3955 		return -EFAULT;
3956 
3957 	/* TX HW timestamp */
3958 	switch (config.tx_type) {
3959 	case HWTSTAMP_TX_OFF:
3960 	case HWTSTAMP_TX_ON:
3961 		break;
3962 	default:
3963 		return -ERANGE;
3964 	}
3965 
3966 	mutex_lock(&priv->state_lock);
3967 	/* RX HW timestamp */
3968 	switch (config.rx_filter) {
3969 	case HWTSTAMP_FILTER_NONE:
3970 		/* Reset CQE compression to Admin default */
3971 		mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3972 		break;
3973 	case HWTSTAMP_FILTER_ALL:
3974 	case HWTSTAMP_FILTER_SOME:
3975 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3976 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3977 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3978 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3979 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3980 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3981 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3982 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3983 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3984 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
3985 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
3986 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3987 	case HWTSTAMP_FILTER_NTP_ALL:
3988 		/* Disable CQE compression */
3989 		if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
3990 			netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
3991 		err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3992 		if (err) {
3993 			netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3994 			mutex_unlock(&priv->state_lock);
3995 			return err;
3996 		}
3997 		config.rx_filter = HWTSTAMP_FILTER_ALL;
3998 		break;
3999 	default:
4000 		mutex_unlock(&priv->state_lock);
4001 		return -ERANGE;
4002 	}
4003 
4004 	memcpy(&priv->tstamp, &config, sizeof(config));
4005 	mutex_unlock(&priv->state_lock);
4006 
4007 	/* might need to fix some features */
4008 	netdev_update_features(priv->netdev);
4009 
4010 	return copy_to_user(ifr->ifr_data, &config,
4011 			    sizeof(config)) ? -EFAULT : 0;
4012 }
4013 
mlx5e_hwstamp_get(struct mlx5e_priv * priv,struct ifreq * ifr)4014 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
4015 {
4016 	struct hwtstamp_config *cfg = &priv->tstamp;
4017 
4018 	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4019 		return -EOPNOTSUPP;
4020 
4021 	return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
4022 }
4023 
mlx5e_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4024 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4025 {
4026 	struct mlx5e_priv *priv = netdev_priv(dev);
4027 
4028 	switch (cmd) {
4029 	case SIOCSHWTSTAMP:
4030 		return mlx5e_hwstamp_set(priv, ifr);
4031 	case SIOCGHWTSTAMP:
4032 		return mlx5e_hwstamp_get(priv, ifr);
4033 	default:
4034 		return -EOPNOTSUPP;
4035 	}
4036 }
4037 
4038 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_set_vf_mac(struct net_device * dev,int vf,u8 * mac)4039 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
4040 {
4041 	struct mlx5e_priv *priv = netdev_priv(dev);
4042 	struct mlx5_core_dev *mdev = priv->mdev;
4043 
4044 	return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4045 }
4046 
mlx5e_set_vf_vlan(struct net_device * dev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)4047 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4048 			     __be16 vlan_proto)
4049 {
4050 	struct mlx5e_priv *priv = netdev_priv(dev);
4051 	struct mlx5_core_dev *mdev = priv->mdev;
4052 
4053 	if (vlan_proto != htons(ETH_P_8021Q))
4054 		return -EPROTONOSUPPORT;
4055 
4056 	return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4057 					   vlan, qos);
4058 }
4059 
mlx5e_set_vf_spoofchk(struct net_device * dev,int vf,bool setting)4060 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4061 {
4062 	struct mlx5e_priv *priv = netdev_priv(dev);
4063 	struct mlx5_core_dev *mdev = priv->mdev;
4064 
4065 	return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4066 }
4067 
mlx5e_set_vf_trust(struct net_device * dev,int vf,bool setting)4068 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4069 {
4070 	struct mlx5e_priv *priv = netdev_priv(dev);
4071 	struct mlx5_core_dev *mdev = priv->mdev;
4072 
4073 	return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4074 }
4075 
mlx5e_set_vf_rate(struct net_device * dev,int vf,int min_tx_rate,int max_tx_rate)4076 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4077 		      int max_tx_rate)
4078 {
4079 	struct mlx5e_priv *priv = netdev_priv(dev);
4080 	struct mlx5_core_dev *mdev = priv->mdev;
4081 
4082 	return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
4083 					   max_tx_rate, min_tx_rate);
4084 }
4085 
mlx5_vport_link2ifla(u8 esw_link)4086 static int mlx5_vport_link2ifla(u8 esw_link)
4087 {
4088 	switch (esw_link) {
4089 	case MLX5_VPORT_ADMIN_STATE_DOWN:
4090 		return IFLA_VF_LINK_STATE_DISABLE;
4091 	case MLX5_VPORT_ADMIN_STATE_UP:
4092 		return IFLA_VF_LINK_STATE_ENABLE;
4093 	}
4094 	return IFLA_VF_LINK_STATE_AUTO;
4095 }
4096 
mlx5_ifla_link2vport(u8 ifla_link)4097 static int mlx5_ifla_link2vport(u8 ifla_link)
4098 {
4099 	switch (ifla_link) {
4100 	case IFLA_VF_LINK_STATE_DISABLE:
4101 		return MLX5_VPORT_ADMIN_STATE_DOWN;
4102 	case IFLA_VF_LINK_STATE_ENABLE:
4103 		return MLX5_VPORT_ADMIN_STATE_UP;
4104 	}
4105 	return MLX5_VPORT_ADMIN_STATE_AUTO;
4106 }
4107 
mlx5e_set_vf_link_state(struct net_device * dev,int vf,int link_state)4108 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4109 				   int link_state)
4110 {
4111 	struct mlx5e_priv *priv = netdev_priv(dev);
4112 	struct mlx5_core_dev *mdev = priv->mdev;
4113 
4114 	return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4115 					    mlx5_ifla_link2vport(link_state));
4116 }
4117 
mlx5e_get_vf_config(struct net_device * dev,int vf,struct ifla_vf_info * ivi)4118 int mlx5e_get_vf_config(struct net_device *dev,
4119 			int vf, struct ifla_vf_info *ivi)
4120 {
4121 	struct mlx5e_priv *priv = netdev_priv(dev);
4122 	struct mlx5_core_dev *mdev = priv->mdev;
4123 	int err;
4124 
4125 	err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4126 	if (err)
4127 		return err;
4128 	ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4129 	return 0;
4130 }
4131 
mlx5e_get_vf_stats(struct net_device * dev,int vf,struct ifla_vf_stats * vf_stats)4132 int mlx5e_get_vf_stats(struct net_device *dev,
4133 		       int vf, struct ifla_vf_stats *vf_stats)
4134 {
4135 	struct mlx5e_priv *priv = netdev_priv(dev);
4136 	struct mlx5_core_dev *mdev = priv->mdev;
4137 
4138 	return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4139 					    vf_stats);
4140 }
4141 #endif
4142 
4143 struct mlx5e_vxlan_work {
4144 	struct work_struct	work;
4145 	struct mlx5e_priv	*priv;
4146 	u16			port;
4147 };
4148 
mlx5e_vxlan_add_work(struct work_struct * work)4149 static void mlx5e_vxlan_add_work(struct work_struct *work)
4150 {
4151 	struct mlx5e_vxlan_work *vxlan_work =
4152 		container_of(work, struct mlx5e_vxlan_work, work);
4153 	struct mlx5e_priv *priv = vxlan_work->priv;
4154 	u16 port = vxlan_work->port;
4155 
4156 	mutex_lock(&priv->state_lock);
4157 	mlx5_vxlan_add_port(priv->mdev->vxlan, port);
4158 	mutex_unlock(&priv->state_lock);
4159 
4160 	kfree(vxlan_work);
4161 }
4162 
mlx5e_vxlan_del_work(struct work_struct * work)4163 static void mlx5e_vxlan_del_work(struct work_struct *work)
4164 {
4165 	struct mlx5e_vxlan_work *vxlan_work =
4166 		container_of(work, struct mlx5e_vxlan_work, work);
4167 	struct mlx5e_priv *priv         = vxlan_work->priv;
4168 	u16 port = vxlan_work->port;
4169 
4170 	mutex_lock(&priv->state_lock);
4171 	mlx5_vxlan_del_port(priv->mdev->vxlan, port);
4172 	mutex_unlock(&priv->state_lock);
4173 	kfree(vxlan_work);
4174 }
4175 
mlx5e_vxlan_queue_work(struct mlx5e_priv * priv,u16 port,int add)4176 static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
4177 {
4178 	struct mlx5e_vxlan_work *vxlan_work;
4179 
4180 	vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
4181 	if (!vxlan_work)
4182 		return;
4183 
4184 	if (add)
4185 		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
4186 	else
4187 		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
4188 
4189 	vxlan_work->priv = priv;
4190 	vxlan_work->port = port;
4191 	queue_work(priv->wq, &vxlan_work->work);
4192 }
4193 
mlx5e_add_vxlan_port(struct net_device * netdev,struct udp_tunnel_info * ti)4194 void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
4195 {
4196 	struct mlx5e_priv *priv = netdev_priv(netdev);
4197 
4198 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4199 		return;
4200 
4201 	if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
4202 		return;
4203 
4204 	mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
4205 }
4206 
mlx5e_del_vxlan_port(struct net_device * netdev,struct udp_tunnel_info * ti)4207 void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
4208 {
4209 	struct mlx5e_priv *priv = netdev_priv(netdev);
4210 
4211 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4212 		return;
4213 
4214 	if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
4215 		return;
4216 
4217 	mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
4218 }
4219 
mlx5e_tunnel_features_check(struct mlx5e_priv * priv,struct sk_buff * skb,netdev_features_t features)4220 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4221 						     struct sk_buff *skb,
4222 						     netdev_features_t features)
4223 {
4224 	unsigned int offset = 0;
4225 	struct udphdr *udph;
4226 	u8 proto;
4227 	u16 port;
4228 
4229 	switch (vlan_get_protocol(skb)) {
4230 	case htons(ETH_P_IP):
4231 		proto = ip_hdr(skb)->protocol;
4232 		break;
4233 	case htons(ETH_P_IPV6):
4234 		proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
4235 		break;
4236 	default:
4237 		goto out;
4238 	}
4239 
4240 	switch (proto) {
4241 	case IPPROTO_GRE:
4242 		return features;
4243 	case IPPROTO_IPIP:
4244 	case IPPROTO_IPV6:
4245 		if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
4246 			return features;
4247 		break;
4248 	case IPPROTO_UDP:
4249 		udph = udp_hdr(skb);
4250 		port = be16_to_cpu(udph->dest);
4251 
4252 		/* Verify if UDP port is being offloaded by HW */
4253 		if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4254 			return features;
4255 
4256 #if IS_ENABLED(CONFIG_GENEVE)
4257 		/* Support Geneve offload for default UDP port */
4258 		if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4259 			return features;
4260 #endif
4261 	}
4262 
4263 out:
4264 	/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4265 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4266 }
4267 
mlx5e_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4268 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4269 				       struct net_device *netdev,
4270 				       netdev_features_t features)
4271 {
4272 	struct mlx5e_priv *priv = netdev_priv(netdev);
4273 
4274 	features = vlan_features_check(skb, features);
4275 	features = vxlan_features_check(skb, features);
4276 
4277 #ifdef CONFIG_MLX5_EN_IPSEC
4278 	if (mlx5e_ipsec_feature_check(skb, netdev, features))
4279 		return features;
4280 #endif
4281 
4282 	/* Validate if the tunneled packet is being offloaded by HW */
4283 	if (skb->encapsulation &&
4284 	    (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
4285 		return mlx5e_tunnel_features_check(priv, skb, features);
4286 
4287 	return features;
4288 }
4289 
mlx5e_tx_timeout_work(struct work_struct * work)4290 static void mlx5e_tx_timeout_work(struct work_struct *work)
4291 {
4292 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4293 					       tx_timeout_work);
4294 	bool report_failed = false;
4295 	int err;
4296 	int i;
4297 
4298 	rtnl_lock();
4299 	mutex_lock(&priv->state_lock);
4300 
4301 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4302 		goto unlock;
4303 
4304 	for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
4305 		struct netdev_queue *dev_queue =
4306 			netdev_get_tx_queue(priv->netdev, i);
4307 		struct mlx5e_txqsq *sq = priv->txq2sq[i];
4308 
4309 		if (!netif_xmit_stopped(dev_queue))
4310 			continue;
4311 
4312 		if (mlx5e_reporter_tx_timeout(sq))
4313 			report_failed = true;
4314 	}
4315 
4316 	if (!report_failed)
4317 		goto unlock;
4318 
4319 	err = mlx5e_safe_reopen_channels(priv);
4320 	if (err)
4321 		netdev_err(priv->netdev,
4322 			   "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
4323 			   err);
4324 
4325 unlock:
4326 	mutex_unlock(&priv->state_lock);
4327 	rtnl_unlock();
4328 }
4329 
mlx5e_tx_timeout(struct net_device * dev)4330 static void mlx5e_tx_timeout(struct net_device *dev)
4331 {
4332 	struct mlx5e_priv *priv = netdev_priv(dev);
4333 
4334 	netdev_err(dev, "TX timeout detected\n");
4335 	queue_work(priv->wq, &priv->tx_timeout_work);
4336 }
4337 
mlx5e_xdp_allowed(struct mlx5e_priv * priv,struct bpf_prog * prog)4338 static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
4339 {
4340 	struct net_device *netdev = priv->netdev;
4341 	struct mlx5e_channels new_channels = {};
4342 
4343 	if (priv->channels.params.lro_en) {
4344 		netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4345 		return -EINVAL;
4346 	}
4347 
4348 	if (MLX5_IPSEC_DEV(priv->mdev)) {
4349 		netdev_warn(netdev, "can't set XDP with IPSec offload\n");
4350 		return -EINVAL;
4351 	}
4352 
4353 	new_channels.params = priv->channels.params;
4354 	new_channels.params.xdp_prog = prog;
4355 
4356 	/* No XSK params: AF_XDP can't be enabled yet at the point of setting
4357 	 * the XDP program.
4358 	 */
4359 	if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
4360 		netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
4361 			    new_channels.params.sw_mtu,
4362 			    mlx5e_xdp_max_mtu(&new_channels.params, NULL));
4363 		return -EINVAL;
4364 	}
4365 
4366 	return 0;
4367 }
4368 
mlx5e_xdp_set(struct net_device * netdev,struct bpf_prog * prog)4369 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4370 {
4371 	struct mlx5e_priv *priv = netdev_priv(netdev);
4372 	struct bpf_prog *old_prog;
4373 	bool reset, was_opened;
4374 	int err = 0;
4375 	int i;
4376 
4377 	mutex_lock(&priv->state_lock);
4378 
4379 	if (prog) {
4380 		err = mlx5e_xdp_allowed(priv, prog);
4381 		if (err)
4382 			goto unlock;
4383 	}
4384 
4385 	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4386 	/* no need for full reset when exchanging programs */
4387 	reset = (!priv->channels.params.xdp_prog || !prog);
4388 
4389 	if (was_opened && !reset) {
4390 		/* num_channels is invariant here, so we can take the
4391 		 * batched reference right upfront.
4392 		 */
4393 		prog = bpf_prog_add(prog, priv->channels.num);
4394 		if (IS_ERR(prog)) {
4395 			err = PTR_ERR(prog);
4396 			goto unlock;
4397 		}
4398 	}
4399 
4400 	if (was_opened && reset) {
4401 		struct mlx5e_channels new_channels = {};
4402 
4403 		new_channels.params = priv->channels.params;
4404 		new_channels.params.xdp_prog = prog;
4405 		mlx5e_set_rq_type(priv->mdev, &new_channels.params);
4406 		old_prog = priv->channels.params.xdp_prog;
4407 
4408 		err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
4409 		if (err)
4410 			goto unlock;
4411 	} else {
4412 		/* exchange programs, extra prog reference we got from caller
4413 		 * as long as we don't fail from this point onwards.
4414 		 */
4415 		old_prog = xchg(&priv->channels.params.xdp_prog, prog);
4416 	}
4417 
4418 	if (old_prog)
4419 		bpf_prog_put(old_prog);
4420 
4421 	if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
4422 		mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
4423 
4424 	if (!was_opened || reset)
4425 		goto unlock;
4426 
4427 	/* exchanging programs w/o reset, we update ref counts on behalf
4428 	 * of the channels RQs here.
4429 	 */
4430 	for (i = 0; i < priv->channels.num; i++) {
4431 		struct mlx5e_channel *c = priv->channels.c[i];
4432 		bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
4433 
4434 		clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
4435 		if (xsk_open)
4436 			clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
4437 		napi_synchronize(&c->napi);
4438 		/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4439 
4440 		old_prog = xchg(&c->rq.xdp_prog, prog);
4441 		if (old_prog)
4442 			bpf_prog_put(old_prog);
4443 
4444 		if (xsk_open) {
4445 			old_prog = xchg(&c->xskrq.xdp_prog, prog);
4446 			if (old_prog)
4447 				bpf_prog_put(old_prog);
4448 		}
4449 
4450 		set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
4451 		if (xsk_open)
4452 			set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
4453 		/* napi_schedule in case we have missed anything */
4454 		napi_schedule(&c->napi);
4455 	}
4456 
4457 unlock:
4458 	mutex_unlock(&priv->state_lock);
4459 	return err;
4460 }
4461 
mlx5e_xdp_query(struct net_device * dev)4462 static u32 mlx5e_xdp_query(struct net_device *dev)
4463 {
4464 	struct mlx5e_priv *priv = netdev_priv(dev);
4465 	const struct bpf_prog *xdp_prog;
4466 	u32 prog_id = 0;
4467 
4468 	mutex_lock(&priv->state_lock);
4469 	xdp_prog = priv->channels.params.xdp_prog;
4470 	if (xdp_prog)
4471 		prog_id = xdp_prog->aux->id;
4472 	mutex_unlock(&priv->state_lock);
4473 
4474 	return prog_id;
4475 }
4476 
mlx5e_xdp(struct net_device * dev,struct netdev_bpf * xdp)4477 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4478 {
4479 	switch (xdp->command) {
4480 	case XDP_SETUP_PROG:
4481 		return mlx5e_xdp_set(dev, xdp->prog);
4482 	case XDP_QUERY_PROG:
4483 		xdp->prog_id = mlx5e_xdp_query(dev);
4484 		return 0;
4485 	case XDP_SETUP_XSK_UMEM:
4486 		return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
4487 					    xdp->xsk.queue_id);
4488 	default:
4489 		return -EINVAL;
4490 	}
4491 }
4492 
4493 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)4494 static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4495 				struct net_device *dev, u32 filter_mask,
4496 				int nlflags)
4497 {
4498 	struct mlx5e_priv *priv = netdev_priv(dev);
4499 	struct mlx5_core_dev *mdev = priv->mdev;
4500 	u8 mode, setting;
4501 	int err;
4502 
4503 	err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
4504 	if (err)
4505 		return err;
4506 	mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
4507 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4508 				       mode,
4509 				       0, 0, nlflags, filter_mask, NULL);
4510 }
4511 
mlx5e_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)4512 static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4513 				u16 flags, struct netlink_ext_ack *extack)
4514 {
4515 	struct mlx5e_priv *priv = netdev_priv(dev);
4516 	struct mlx5_core_dev *mdev = priv->mdev;
4517 	struct nlattr *attr, *br_spec;
4518 	u16 mode = BRIDGE_MODE_UNDEF;
4519 	u8 setting;
4520 	int rem;
4521 
4522 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4523 	if (!br_spec)
4524 		return -EINVAL;
4525 
4526 	nla_for_each_nested(attr, br_spec, rem) {
4527 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
4528 			continue;
4529 
4530 		if (nla_len(attr) < sizeof(mode))
4531 			return -EINVAL;
4532 
4533 		mode = nla_get_u16(attr);
4534 		if (mode > BRIDGE_MODE_VEPA)
4535 			return -EINVAL;
4536 
4537 		break;
4538 	}
4539 
4540 	if (mode == BRIDGE_MODE_UNDEF)
4541 		return -EINVAL;
4542 
4543 	setting = (mode == BRIDGE_MODE_VEPA) ?  1 : 0;
4544 	return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
4545 }
4546 #endif
4547 
4548 const struct net_device_ops mlx5e_netdev_ops = {
4549 	.ndo_open                = mlx5e_open,
4550 	.ndo_stop                = mlx5e_close,
4551 	.ndo_start_xmit          = mlx5e_xmit,
4552 	.ndo_setup_tc            = mlx5e_setup_tc,
4553 	.ndo_select_queue        = mlx5e_select_queue,
4554 	.ndo_get_stats64         = mlx5e_get_stats,
4555 	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
4556 	.ndo_set_mac_address     = mlx5e_set_mac,
4557 	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
4558 	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
4559 	.ndo_set_features        = mlx5e_set_features,
4560 	.ndo_fix_features        = mlx5e_fix_features,
4561 	.ndo_change_mtu          = mlx5e_change_nic_mtu,
4562 	.ndo_do_ioctl            = mlx5e_ioctl,
4563 	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
4564 	.ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
4565 	.ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
4566 	.ndo_features_check      = mlx5e_features_check,
4567 	.ndo_tx_timeout          = mlx5e_tx_timeout,
4568 	.ndo_bpf		 = mlx5e_xdp,
4569 	.ndo_xdp_xmit            = mlx5e_xdp_xmit,
4570 	.ndo_xsk_wakeup          = mlx5e_xsk_wakeup,
4571 #ifdef CONFIG_MLX5_EN_ARFS
4572 	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
4573 #endif
4574 #ifdef CONFIG_MLX5_ESWITCH
4575 	.ndo_bridge_setlink      = mlx5e_bridge_setlink,
4576 	.ndo_bridge_getlink      = mlx5e_bridge_getlink,
4577 
4578 	/* SRIOV E-Switch NDOs */
4579 	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
4580 	.ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
4581 	.ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
4582 	.ndo_set_vf_trust        = mlx5e_set_vf_trust,
4583 	.ndo_set_vf_rate         = mlx5e_set_vf_rate,
4584 	.ndo_get_vf_config       = mlx5e_get_vf_config,
4585 	.ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
4586 	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
4587 #endif
4588 };
4589 
mlx5e_check_required_hca_cap(struct mlx5_core_dev * mdev)4590 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4591 {
4592 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
4593 		return -EOPNOTSUPP;
4594 	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4595 	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4596 	    !MLX5_CAP_ETH(mdev, csum_cap) ||
4597 	    !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4598 	    !MLX5_CAP_ETH(mdev, vlan_cap) ||
4599 	    !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4600 	    MLX5_CAP_FLOWTABLE(mdev,
4601 			       flow_table_properties_nic_receive.max_ft_level)
4602 			       < 3) {
4603 		mlx5_core_warn(mdev,
4604 			       "Not creating net device, some required device capabilities are missing\n");
4605 		return -EOPNOTSUPP;
4606 	}
4607 	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4608 		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
4609 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
4610 		mlx5_core_warn(mdev, "CQ moderation is not supported\n");
4611 
4612 	return 0;
4613 }
4614 
mlx5e_build_default_indir_rqt(u32 * indirection_rqt,int len,int num_channels)4615 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
4616 				   int num_channels)
4617 {
4618 	int i;
4619 
4620 	for (i = 0; i < len; i++)
4621 		indirection_rqt[i] = i % num_channels;
4622 }
4623 
slow_pci_heuristic(struct mlx5_core_dev * mdev)4624 static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
4625 {
4626 	u32 link_speed = 0;
4627 	u32 pci_bw = 0;
4628 
4629 	mlx5e_port_max_linkspeed(mdev, &link_speed);
4630 	pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
4631 	mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4632 			   link_speed, pci_bw);
4633 
4634 #define MLX5E_SLOW_PCI_RATIO (2)
4635 
4636 	return link_speed && pci_bw &&
4637 		link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
4638 }
4639 
mlx5e_get_def_tx_moderation(u8 cq_period_mode)4640 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
4641 {
4642 	struct dim_cq_moder moder;
4643 
4644 	moder.cq_period_mode = cq_period_mode;
4645 	moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4646 	moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4647 	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4648 		moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4649 
4650 	return moder;
4651 }
4652 
mlx5e_get_def_rx_moderation(u8 cq_period_mode)4653 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4654 {
4655 	struct dim_cq_moder moder;
4656 
4657 	moder.cq_period_mode = cq_period_mode;
4658 	moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4659 	moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
4660 	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4661 		moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4662 
4663 	return moder;
4664 }
4665 
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)4666 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4667 {
4668 	return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4669 		DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4670 		DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4671 }
4672 
mlx5e_set_tx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)4673 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4674 {
4675 	if (params->tx_dim_enabled) {
4676 		u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4677 
4678 		params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4679 	} else {
4680 		params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4681 	}
4682 
4683 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4684 			params->tx_cq_moderation.cq_period_mode ==
4685 				MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4686 }
4687 
mlx5e_set_rx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)4688 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4689 {
4690 	if (params->rx_dim_enabled) {
4691 		u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4692 
4693 		params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4694 	} else {
4695 		params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
4696 	}
4697 
4698 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4699 			params->rx_cq_moderation.cq_period_mode ==
4700 				MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4701 }
4702 
mlx5e_choose_lro_timeout(struct mlx5_core_dev * mdev,u32 wanted_timeout)4703 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4704 {
4705 	int i;
4706 
4707 	/* The supported periods are organized in ascending order */
4708 	for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4709 		if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4710 			break;
4711 
4712 	return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4713 }
4714 
mlx5e_build_rq_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)4715 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
4716 			   struct mlx5e_params *params)
4717 {
4718 	/* Prefer Striding RQ, unless any of the following holds:
4719 	 * - Striding RQ configuration is not possible/supported.
4720 	 * - Slow PCI heuristic.
4721 	 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4722 	 *
4723 	 * No XSK params: checking the availability of striding RQ in general.
4724 	 */
4725 	if (!slow_pci_heuristic(mdev) &&
4726 	    mlx5e_striding_rq_possible(mdev, params) &&
4727 	    (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
4728 	     !mlx5e_rx_is_linear_skb(params, NULL)))
4729 		MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
4730 	mlx5e_set_rq_type(mdev, params);
4731 	mlx5e_init_rq_type_params(mdev, params);
4732 }
4733 
mlx5e_build_rss_params(struct mlx5e_rss_params * rss_params,u16 num_channels)4734 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
4735 			    u16 num_channels)
4736 {
4737 	enum mlx5e_traffic_types tt;
4738 
4739 	rss_params->hfunc = ETH_RSS_HASH_TOP;
4740 	netdev_rss_key_fill(rss_params->toeplitz_hash_key,
4741 			    sizeof(rss_params->toeplitz_hash_key));
4742 	mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
4743 				      MLX5E_INDIR_RQT_SIZE, num_channels);
4744 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
4745 		rss_params->rx_hash_fields[tt] =
4746 			tirc_default_config[tt].rx_hash_fields;
4747 }
4748 
mlx5e_build_nic_params(struct mlx5_core_dev * mdev,struct mlx5e_xsk * xsk,struct mlx5e_rss_params * rss_params,struct mlx5e_params * params,u16 max_channels,u16 mtu)4749 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4750 			    struct mlx5e_xsk *xsk,
4751 			    struct mlx5e_rss_params *rss_params,
4752 			    struct mlx5e_params *params,
4753 			    u16 max_channels, u16 mtu)
4754 {
4755 	u8 rx_cq_period_mode;
4756 
4757 	params->sw_mtu = mtu;
4758 	params->hard_mtu = MLX5E_ETH_HARD_MTU;
4759 	params->num_channels = max_channels;
4760 	params->num_tc       = 1;
4761 
4762 	/* SQ */
4763 	params->log_sq_size = is_kdump_kernel() ?
4764 		MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4765 		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4766 
4767 	/* XDP SQ */
4768 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
4769 			MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
4770 
4771 	/* set CQE compression */
4772 	params->rx_cqe_compress_def = false;
4773 	if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4774 	    MLX5_CAP_GEN(mdev, vport_group_manager))
4775 		params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
4776 
4777 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4778 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
4779 
4780 	/* RQ */
4781 	mlx5e_build_rq_params(mdev, params);
4782 
4783 	/* HW LRO */
4784 
4785 	/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4786 	if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
4787 		/* No XSK params: checking the availability of striding RQ in general. */
4788 		if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
4789 			params->lro_en = !slow_pci_heuristic(mdev);
4790 	}
4791 	params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4792 
4793 	/* CQ moderation params */
4794 	rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4795 			MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4796 			MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4797 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4798 	params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4799 	mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4800 	mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
4801 
4802 	/* TX inline */
4803 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
4804 
4805 	/* RSS */
4806 	mlx5e_build_rss_params(rss_params, params->num_channels);
4807 	params->tunneled_offload_en =
4808 		mlx5e_tunnel_inner_ft_supported(mdev);
4809 
4810 	/* AF_XDP */
4811 	params->xsk = xsk;
4812 }
4813 
mlx5e_set_netdev_dev_addr(struct net_device * netdev)4814 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4815 {
4816 	struct mlx5e_priv *priv = netdev_priv(netdev);
4817 
4818 	mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
4819 	if (is_zero_ether_addr(netdev->dev_addr) &&
4820 	    !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4821 		eth_hw_addr_random(netdev);
4822 		mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4823 	}
4824 }
4825 
mlx5e_build_nic_netdev(struct net_device * netdev)4826 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4827 {
4828 	struct mlx5e_priv *priv = netdev_priv(netdev);
4829 	struct mlx5_core_dev *mdev = priv->mdev;
4830 	bool fcs_supported;
4831 	bool fcs_enabled;
4832 
4833 	SET_NETDEV_DEV(netdev, mdev->device);
4834 
4835 	netdev->netdev_ops = &mlx5e_netdev_ops;
4836 
4837 #ifdef CONFIG_MLX5_CORE_EN_DCB
4838 	if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4839 		netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4840 #endif
4841 
4842 	netdev->watchdog_timeo    = 15 * HZ;
4843 
4844 	netdev->ethtool_ops	  = &mlx5e_ethtool_ops;
4845 
4846 	netdev->vlan_features    |= NETIF_F_SG;
4847 	netdev->vlan_features    |= NETIF_F_HW_CSUM;
4848 	netdev->vlan_features    |= NETIF_F_GRO;
4849 	netdev->vlan_features    |= NETIF_F_TSO;
4850 	netdev->vlan_features    |= NETIF_F_TSO6;
4851 	netdev->vlan_features    |= NETIF_F_RXCSUM;
4852 	netdev->vlan_features    |= NETIF_F_RXHASH;
4853 
4854 	netdev->mpls_features    |= NETIF_F_SG;
4855 	netdev->mpls_features    |= NETIF_F_HW_CSUM;
4856 	netdev->mpls_features    |= NETIF_F_TSO;
4857 	netdev->mpls_features    |= NETIF_F_TSO6;
4858 
4859 	netdev->hw_enc_features  |= NETIF_F_HW_VLAN_CTAG_TX;
4860 	netdev->hw_enc_features  |= NETIF_F_HW_VLAN_CTAG_RX;
4861 
4862 	if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4863 	    mlx5e_check_fragmented_striding_rq_cap(mdev))
4864 		netdev->vlan_features    |= NETIF_F_LRO;
4865 
4866 	netdev->hw_features       = netdev->vlan_features;
4867 	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
4868 	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
4869 	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
4870 	netdev->hw_features      |= NETIF_F_HW_VLAN_STAG_TX;
4871 
4872 	if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
4873 	    mlx5e_any_tunnel_proto_supported(mdev)) {
4874 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
4875 		netdev->hw_enc_features |= NETIF_F_TSO;
4876 		netdev->hw_enc_features |= NETIF_F_TSO6;
4877 		netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4878 	}
4879 
4880 	if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
4881 		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
4882 					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
4883 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4884 					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
4885 		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4886 	}
4887 
4888 	if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
4889 		netdev->hw_features     |= NETIF_F_GSO_GRE |
4890 					   NETIF_F_GSO_GRE_CSUM;
4891 		netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4892 					   NETIF_F_GSO_GRE_CSUM;
4893 		netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4894 						NETIF_F_GSO_GRE_CSUM;
4895 	}
4896 
4897 	if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
4898 		netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
4899 				       NETIF_F_GSO_IPXIP6;
4900 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
4901 					   NETIF_F_GSO_IPXIP6;
4902 		netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
4903 						NETIF_F_GSO_IPXIP6;
4904 	}
4905 
4906 	netdev->hw_features	                 |= NETIF_F_GSO_PARTIAL;
4907 	netdev->gso_partial_features             |= NETIF_F_GSO_UDP_L4;
4908 	netdev->hw_features                      |= NETIF_F_GSO_UDP_L4;
4909 	netdev->features                         |= NETIF_F_GSO_UDP_L4;
4910 
4911 	mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4912 
4913 	if (fcs_supported)
4914 		netdev->hw_features |= NETIF_F_RXALL;
4915 
4916 	if (MLX5_CAP_ETH(mdev, scatter_fcs))
4917 		netdev->hw_features |= NETIF_F_RXFCS;
4918 
4919 	netdev->features          = netdev->hw_features;
4920 	if (!priv->channels.params.lro_en)
4921 		netdev->features  &= ~NETIF_F_LRO;
4922 
4923 	if (fcs_enabled)
4924 		netdev->features  &= ~NETIF_F_RXALL;
4925 
4926 	if (!priv->channels.params.scatter_fcs_en)
4927 		netdev->features  &= ~NETIF_F_RXFCS;
4928 
4929 	/* prefere CQE compression over rxhash */
4930 	if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4931 		netdev->features &= ~NETIF_F_RXHASH;
4932 
4933 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4934 	if (FT_CAP(flow_modify_en) &&
4935 	    FT_CAP(modify_root) &&
4936 	    FT_CAP(identified_miss_table_mode) &&
4937 	    FT_CAP(flow_table_modify)) {
4938 #ifdef CONFIG_MLX5_ESWITCH
4939 		netdev->hw_features      |= NETIF_F_HW_TC;
4940 #endif
4941 #ifdef CONFIG_MLX5_EN_ARFS
4942 		netdev->hw_features	 |= NETIF_F_NTUPLE;
4943 #endif
4944 	}
4945 
4946 	netdev->features         |= NETIF_F_HIGHDMA;
4947 	netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
4948 
4949 	netdev->priv_flags       |= IFF_UNICAST_FLT;
4950 
4951 	mlx5e_set_netdev_dev_addr(netdev);
4952 	mlx5e_ipsec_build_netdev(priv);
4953 	mlx5e_tls_build_netdev(priv);
4954 }
4955 
mlx5e_create_q_counters(struct mlx5e_priv * priv)4956 void mlx5e_create_q_counters(struct mlx5e_priv *priv)
4957 {
4958 	struct mlx5_core_dev *mdev = priv->mdev;
4959 	int err;
4960 
4961 	err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4962 	if (err) {
4963 		mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4964 		priv->q_counter = 0;
4965 	}
4966 
4967 	err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4968 	if (err) {
4969 		mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4970 		priv->drop_rq_q_counter = 0;
4971 	}
4972 }
4973 
mlx5e_destroy_q_counters(struct mlx5e_priv * priv)4974 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
4975 {
4976 	if (priv->q_counter)
4977 		mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4978 
4979 	if (priv->drop_rq_q_counter)
4980 		mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
4981 }
4982 
mlx5e_nic_init(struct mlx5_core_dev * mdev,struct net_device * netdev,const struct mlx5e_profile * profile,void * ppriv)4983 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
4984 			  struct net_device *netdev,
4985 			  const struct mlx5e_profile *profile,
4986 			  void *ppriv)
4987 {
4988 	struct mlx5e_priv *priv = netdev_priv(netdev);
4989 	struct mlx5e_rss_params *rss = &priv->rss_params;
4990 	int err;
4991 
4992 	err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
4993 	if (err)
4994 		return err;
4995 
4996 	mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
4997 			       priv->max_nch, netdev->mtu);
4998 
4999 	mlx5e_timestamp_init(priv);
5000 
5001 	err = mlx5e_ipsec_init(priv);
5002 	if (err)
5003 		mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
5004 	err = mlx5e_tls_init(priv);
5005 	if (err)
5006 		mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
5007 	mlx5e_build_nic_netdev(netdev);
5008 	mlx5e_health_create_reporters(priv);
5009 
5010 	return 0;
5011 }
5012 
mlx5e_nic_cleanup(struct mlx5e_priv * priv)5013 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
5014 {
5015 	mlx5e_health_destroy_reporters(priv);
5016 	mlx5e_tls_cleanup(priv);
5017 	mlx5e_ipsec_cleanup(priv);
5018 	mlx5e_netdev_cleanup(priv->netdev, priv);
5019 }
5020 
mlx5e_init_nic_rx(struct mlx5e_priv * priv)5021 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
5022 {
5023 	struct mlx5_core_dev *mdev = priv->mdev;
5024 	int err;
5025 
5026 	mlx5e_create_q_counters(priv);
5027 
5028 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5029 	if (err) {
5030 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5031 		goto err_destroy_q_counters;
5032 	}
5033 
5034 	err = mlx5e_create_indirect_rqt(priv);
5035 	if (err)
5036 		goto err_close_drop_rq;
5037 
5038 	err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
5039 	if (err)
5040 		goto err_destroy_indirect_rqts;
5041 
5042 	err = mlx5e_create_indirect_tirs(priv, true);
5043 	if (err)
5044 		goto err_destroy_direct_rqts;
5045 
5046 	err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
5047 	if (err)
5048 		goto err_destroy_indirect_tirs;
5049 
5050 	err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
5051 	if (unlikely(err))
5052 		goto err_destroy_direct_tirs;
5053 
5054 	err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
5055 	if (unlikely(err))
5056 		goto err_destroy_xsk_rqts;
5057 
5058 	err = mlx5e_create_flow_steering(priv);
5059 	if (err) {
5060 		mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
5061 		goto err_destroy_xsk_tirs;
5062 	}
5063 
5064 	err = mlx5e_tc_nic_init(priv);
5065 	if (err)
5066 		goto err_destroy_flow_steering;
5067 
5068 	return 0;
5069 
5070 err_destroy_flow_steering:
5071 	mlx5e_destroy_flow_steering(priv);
5072 err_destroy_xsk_tirs:
5073 	mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5074 err_destroy_xsk_rqts:
5075 	mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
5076 err_destroy_direct_tirs:
5077 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
5078 err_destroy_indirect_tirs:
5079 	mlx5e_destroy_indirect_tirs(priv, true);
5080 err_destroy_direct_rqts:
5081 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
5082 err_destroy_indirect_rqts:
5083 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
5084 err_close_drop_rq:
5085 	mlx5e_close_drop_rq(&priv->drop_rq);
5086 err_destroy_q_counters:
5087 	mlx5e_destroy_q_counters(priv);
5088 	return err;
5089 }
5090 
mlx5e_cleanup_nic_rx(struct mlx5e_priv * priv)5091 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
5092 {
5093 	mlx5e_tc_nic_cleanup(priv);
5094 	mlx5e_destroy_flow_steering(priv);
5095 	mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5096 	mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
5097 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
5098 	mlx5e_destroy_indirect_tirs(priv, true);
5099 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
5100 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
5101 	mlx5e_close_drop_rq(&priv->drop_rq);
5102 	mlx5e_destroy_q_counters(priv);
5103 }
5104 
mlx5e_init_nic_tx(struct mlx5e_priv * priv)5105 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
5106 {
5107 	int err;
5108 
5109 	err = mlx5e_create_tises(priv);
5110 	if (err) {
5111 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
5112 		return err;
5113 	}
5114 
5115 #ifdef CONFIG_MLX5_CORE_EN_DCB
5116 	mlx5e_dcbnl_initialize(priv);
5117 #endif
5118 	return 0;
5119 }
5120 
mlx5e_nic_enable(struct mlx5e_priv * priv)5121 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
5122 {
5123 	struct net_device *netdev = priv->netdev;
5124 	struct mlx5_core_dev *mdev = priv->mdev;
5125 
5126 	mlx5e_init_l2_addr(priv);
5127 
5128 	/* Marking the link as currently not needed by the Driver */
5129 	if (!netif_running(netdev))
5130 		mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
5131 
5132 	mlx5e_set_netdev_mtu_boundaries(priv);
5133 	mlx5e_set_dev_port_mtu(priv);
5134 
5135 	mlx5_lag_add(mdev, netdev);
5136 
5137 	mlx5e_enable_async_events(priv);
5138 	if (mlx5e_monitor_counter_supported(priv))
5139 		mlx5e_monitor_counter_init(priv);
5140 
5141 	mlx5e_hv_vhca_stats_create(priv);
5142 	if (netdev->reg_state != NETREG_REGISTERED)
5143 		return;
5144 #ifdef CONFIG_MLX5_CORE_EN_DCB
5145 	mlx5e_dcbnl_init_app(priv);
5146 #endif
5147 
5148 	queue_work(priv->wq, &priv->set_rx_mode_work);
5149 
5150 	rtnl_lock();
5151 	if (netif_running(netdev))
5152 		mlx5e_open(netdev);
5153 	netif_device_attach(netdev);
5154 	rtnl_unlock();
5155 }
5156 
mlx5e_nic_disable(struct mlx5e_priv * priv)5157 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5158 {
5159 	struct mlx5_core_dev *mdev = priv->mdev;
5160 
5161 #ifdef CONFIG_MLX5_CORE_EN_DCB
5162 	if (priv->netdev->reg_state == NETREG_REGISTERED)
5163 		mlx5e_dcbnl_delete_app(priv);
5164 #endif
5165 
5166 	rtnl_lock();
5167 	if (netif_running(priv->netdev))
5168 		mlx5e_close(priv->netdev);
5169 	netif_device_detach(priv->netdev);
5170 	rtnl_unlock();
5171 
5172 	queue_work(priv->wq, &priv->set_rx_mode_work);
5173 
5174 	mlx5e_hv_vhca_stats_destroy(priv);
5175 	if (mlx5e_monitor_counter_supported(priv))
5176 		mlx5e_monitor_counter_cleanup(priv);
5177 
5178 	mlx5e_disable_async_events(priv);
5179 	mlx5_lag_remove(mdev);
5180 }
5181 
mlx5e_update_nic_rx(struct mlx5e_priv * priv)5182 int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
5183 {
5184 	return mlx5e_refresh_tirs(priv, false);
5185 }
5186 
5187 static const struct mlx5e_profile mlx5e_nic_profile = {
5188 	.init		   = mlx5e_nic_init,
5189 	.cleanup	   = mlx5e_nic_cleanup,
5190 	.init_rx	   = mlx5e_init_nic_rx,
5191 	.cleanup_rx	   = mlx5e_cleanup_nic_rx,
5192 	.init_tx	   = mlx5e_init_nic_tx,
5193 	.cleanup_tx	   = mlx5e_cleanup_nic_tx,
5194 	.enable		   = mlx5e_nic_enable,
5195 	.disable	   = mlx5e_nic_disable,
5196 	.update_rx	   = mlx5e_update_nic_rx,
5197 	.update_stats	   = mlx5e_update_ndo_stats,
5198 	.update_carrier	   = mlx5e_update_carrier,
5199 	.rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe,
5200 	.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
5201 	.max_tc		   = MLX5E_MAX_NUM_TC,
5202 	.rq_groups	   = MLX5E_NUM_RQ_GROUPS(XSK),
5203 };
5204 
5205 /* mlx5e generic netdev management API (move to en_common.c) */
5206 
5207 /* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
mlx5e_netdev_init(struct net_device * netdev,struct mlx5e_priv * priv,struct mlx5_core_dev * mdev,const struct mlx5e_profile * profile,void * ppriv)5208 int mlx5e_netdev_init(struct net_device *netdev,
5209 		      struct mlx5e_priv *priv,
5210 		      struct mlx5_core_dev *mdev,
5211 		      const struct mlx5e_profile *profile,
5212 		      void *ppriv)
5213 {
5214 	/* priv init */
5215 	priv->mdev        = mdev;
5216 	priv->netdev      = netdev;
5217 	priv->profile     = profile;
5218 	priv->ppriv       = ppriv;
5219 	priv->msglevel    = MLX5E_MSG_LEVEL;
5220 	priv->max_nch     = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
5221 	priv->max_opened_tc = 1;
5222 
5223 	mutex_init(&priv->state_lock);
5224 	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5225 	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5226 	INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
5227 	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
5228 
5229 	priv->wq = create_singlethread_workqueue("mlx5e");
5230 	if (!priv->wq)
5231 		return -ENOMEM;
5232 
5233 	/* netdev init */
5234 	netif_carrier_off(netdev);
5235 
5236 #ifdef CONFIG_MLX5_EN_ARFS
5237 	netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(mdev);
5238 #endif
5239 
5240 	return 0;
5241 }
5242 
mlx5e_netdev_cleanup(struct net_device * netdev,struct mlx5e_priv * priv)5243 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
5244 {
5245 	destroy_workqueue(priv->wq);
5246 }
5247 
mlx5e_create_netdev(struct mlx5_core_dev * mdev,const struct mlx5e_profile * profile,int nch,void * ppriv)5248 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
5249 				       const struct mlx5e_profile *profile,
5250 				       int nch,
5251 				       void *ppriv)
5252 {
5253 	struct net_device *netdev;
5254 	int err;
5255 
5256 	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
5257 				    nch * profile->max_tc,
5258 				    nch * profile->rq_groups);
5259 	if (!netdev) {
5260 		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5261 		return NULL;
5262 	}
5263 
5264 	err = profile->init(mdev, netdev, profile, ppriv);
5265 	if (err) {
5266 		mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
5267 		goto err_free_netdev;
5268 	}
5269 
5270 	return netdev;
5271 
5272 err_free_netdev:
5273 	free_netdev(netdev);
5274 
5275 	return NULL;
5276 }
5277 
mlx5e_attach_netdev(struct mlx5e_priv * priv)5278 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5279 {
5280 	const struct mlx5e_profile *profile;
5281 	int max_nch;
5282 	int err;
5283 
5284 	profile = priv->profile;
5285 	clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5286 
5287 	/* max number of channels may have changed */
5288 	max_nch = mlx5e_get_max_num_channels(priv->mdev);
5289 	if (priv->channels.params.num_channels > max_nch) {
5290 		mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5291 		priv->channels.params.num_channels = max_nch;
5292 		mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
5293 					      MLX5E_INDIR_RQT_SIZE, max_nch);
5294 	}
5295 
5296 	err = profile->init_tx(priv);
5297 	if (err)
5298 		goto out;
5299 
5300 	err = profile->init_rx(priv);
5301 	if (err)
5302 		goto err_cleanup_tx;
5303 
5304 	if (profile->enable)
5305 		profile->enable(priv);
5306 
5307 	return 0;
5308 
5309 err_cleanup_tx:
5310 	profile->cleanup_tx(priv);
5311 
5312 out:
5313 	return err;
5314 }
5315 
mlx5e_detach_netdev(struct mlx5e_priv * priv)5316 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
5317 {
5318 	const struct mlx5e_profile *profile = priv->profile;
5319 
5320 	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5321 
5322 	if (profile->disable)
5323 		profile->disable(priv);
5324 	flush_workqueue(priv->wq);
5325 
5326 	profile->cleanup_rx(priv);
5327 	profile->cleanup_tx(priv);
5328 	cancel_work_sync(&priv->update_stats_work);
5329 }
5330 
mlx5e_destroy_netdev(struct mlx5e_priv * priv)5331 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5332 {
5333 	const struct mlx5e_profile *profile = priv->profile;
5334 	struct net_device *netdev = priv->netdev;
5335 
5336 	if (profile->cleanup)
5337 		profile->cleanup(priv);
5338 	free_netdev(netdev);
5339 }
5340 
5341 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5342  * hardware contexts and to connect it to the current netdev.
5343  */
mlx5e_attach(struct mlx5_core_dev * mdev,void * vpriv)5344 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
5345 {
5346 	struct mlx5e_priv *priv = vpriv;
5347 	struct net_device *netdev = priv->netdev;
5348 	int err;
5349 
5350 	if (netif_device_present(netdev))
5351 		return 0;
5352 
5353 	err = mlx5e_create_mdev_resources(mdev);
5354 	if (err)
5355 		return err;
5356 
5357 	err = mlx5e_attach_netdev(priv);
5358 	if (err) {
5359 		mlx5e_destroy_mdev_resources(mdev);
5360 		return err;
5361 	}
5362 
5363 	return 0;
5364 }
5365 
mlx5e_detach(struct mlx5_core_dev * mdev,void * vpriv)5366 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
5367 {
5368 	struct mlx5e_priv *priv = vpriv;
5369 	struct net_device *netdev = priv->netdev;
5370 
5371 #ifdef CONFIG_MLX5_ESWITCH
5372 	if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
5373 		return;
5374 #endif
5375 
5376 	if (!netif_device_present(netdev))
5377 		return;
5378 
5379 	mlx5e_detach_netdev(priv);
5380 	mlx5e_destroy_mdev_resources(mdev);
5381 }
5382 
mlx5e_add(struct mlx5_core_dev * mdev)5383 static void *mlx5e_add(struct mlx5_core_dev *mdev)
5384 {
5385 	struct net_device *netdev;
5386 	void *priv;
5387 	int err;
5388 	int nch;
5389 
5390 	err = mlx5e_check_required_hca_cap(mdev);
5391 	if (err)
5392 		return NULL;
5393 
5394 #ifdef CONFIG_MLX5_ESWITCH
5395 	if (MLX5_ESWITCH_MANAGER(mdev) &&
5396 	    mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
5397 		mlx5e_rep_register_vport_reps(mdev);
5398 		return mdev;
5399 	}
5400 #endif
5401 
5402 	nch = mlx5e_get_max_num_channels(mdev);
5403 	netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
5404 	if (!netdev) {
5405 		mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
5406 		return NULL;
5407 	}
5408 
5409 	priv = netdev_priv(netdev);
5410 
5411 	err = mlx5e_attach(mdev, priv);
5412 	if (err) {
5413 		mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
5414 		goto err_destroy_netdev;
5415 	}
5416 
5417 	err = register_netdev(netdev);
5418 	if (err) {
5419 		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5420 		goto err_detach;
5421 	}
5422 
5423 #ifdef CONFIG_MLX5_CORE_EN_DCB
5424 	mlx5e_dcbnl_init_app(priv);
5425 #endif
5426 	return priv;
5427 
5428 err_detach:
5429 	mlx5e_detach(mdev, priv);
5430 err_destroy_netdev:
5431 	mlx5e_destroy_netdev(priv);
5432 	return NULL;
5433 }
5434 
mlx5e_remove(struct mlx5_core_dev * mdev,void * vpriv)5435 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
5436 {
5437 	struct mlx5e_priv *priv;
5438 
5439 #ifdef CONFIG_MLX5_ESWITCH
5440 	if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
5441 		mlx5e_rep_unregister_vport_reps(mdev);
5442 		return;
5443 	}
5444 #endif
5445 	priv = vpriv;
5446 #ifdef CONFIG_MLX5_CORE_EN_DCB
5447 	mlx5e_dcbnl_delete_app(priv);
5448 #endif
5449 	unregister_netdev(priv->netdev);
5450 	mlx5e_detach(mdev, vpriv);
5451 	mlx5e_destroy_netdev(priv);
5452 }
5453 
5454 static struct mlx5_interface mlx5e_interface = {
5455 	.add       = mlx5e_add,
5456 	.remove    = mlx5e_remove,
5457 	.attach    = mlx5e_attach,
5458 	.detach    = mlx5e_detach,
5459 	.protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
5460 };
5461 
mlx5e_init(void)5462 void mlx5e_init(void)
5463 {
5464 	mlx5e_ipsec_build_inverse_table();
5465 	mlx5e_build_ptys2ethtool_map();
5466 	mlx5_register_interface(&mlx5e_interface);
5467 }
5468 
mlx5e_cleanup(void)5469 void mlx5e_cleanup(void)
5470 {
5471 	mlx5_unregister_interface(&mlx5e_interface);
5472 }
5473