• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
6 
7 #include "en.h"
8 #include <linux/indirect_call_wrapper.h>
9 
10 #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
11 
12 /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
13  * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
14  * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
15  * full-session WQE be cache-aligned.
16  */
17 #if L1_CACHE_BYTES < 128
18 #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
19 #else
20 #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
21 #endif
22 
23 #define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
24 
25 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
26 
27 enum mlx5e_icosq_wqe_type {
28 	MLX5E_ICOSQ_WQE_NOP,
29 	MLX5E_ICOSQ_WQE_UMR_RX,
30 #ifdef CONFIG_MLX5_EN_TLS
31 	MLX5E_ICOSQ_WQE_UMR_TLS,
32 	MLX5E_ICOSQ_WQE_SET_PSV_TLS,
33 	MLX5E_ICOSQ_WQE_GET_PSV_TLS,
34 #endif
35 };
36 
37 /* General */
mlx5e_skb_is_multicast(struct sk_buff * skb)38 static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
39 {
40 	return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
41 }
42 
43 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
44 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
45 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
46 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
47 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
48 
49 /* RX */
50 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
51 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
52 				struct mlx5e_dma_info *dma_info,
53 				bool recycle);
54 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
55 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
56 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
57 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
58 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
59 
60 /* TX */
61 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
62 		       struct net_device *sb_dev);
63 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
64 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
65 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
66 
67 static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc * wq,u16 cc,u16 pc,u16 n)68 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
69 {
70 	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
71 }
72 
mlx5e_fetch_wqe(struct mlx5_wq_cyc * wq,u16 pi,size_t wqe_size)73 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
74 {
75 	void *wqe;
76 
77 	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
78 	memset(wqe, 0, wqe_size);
79 
80 	return wqe;
81 }
82 
83 #define MLX5E_TX_FETCH_WQE(sq, pi) \
84 	((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
85 
86 static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc * wq,u32 sqn,u16 * pc)87 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
88 {
89 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
90 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
91 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
92 
93 	memset(cseg, 0, sizeof(*cseg));
94 
95 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
96 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
97 
98 	(*pc)++;
99 
100 	return wqe;
101 }
102 
103 static inline struct mlx5e_tx_wqe *
mlx5e_post_nop_fence(struct mlx5_wq_cyc * wq,u32 sqn,u16 * pc)104 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
105 {
106 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
107 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
108 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
109 
110 	memset(cseg, 0, sizeof(*cseg));
111 
112 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
113 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
114 	cseg->fm_ce_se         = MLX5_FENCE_MODE_INITIATOR_SMALL;
115 
116 	(*pc)++;
117 
118 	return wqe;
119 }
120 
121 struct mlx5e_tx_wqe_info {
122 	struct sk_buff *skb;
123 	u32 num_bytes;
124 	u8 num_wqebbs;
125 	u8 num_dma;
126 	u8 num_fifo_pkts;
127 #ifdef CONFIG_MLX5_EN_TLS
128 	struct page *resync_dump_frag_page;
129 #endif
130 };
131 
mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq * sq,u16 size)132 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
133 {
134 	struct mlx5_wq_cyc *wq = &sq->wq;
135 	u16 pi, contig_wqebbs;
136 
137 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
138 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
139 	if (unlikely(contig_wqebbs < size)) {
140 		struct mlx5e_tx_wqe_info *wi, *edge_wi;
141 
142 		wi = &sq->db.wqe_info[pi];
143 		edge_wi = wi + contig_wqebbs;
144 
145 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
146 		for (; wi < edge_wi; wi++) {
147 			*wi = (struct mlx5e_tx_wqe_info) {
148 				.num_wqebbs = 1,
149 			};
150 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
151 		}
152 		sq->stats->nop += contig_wqebbs;
153 
154 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
155 	}
156 
157 	return pi;
158 }
159 
160 struct mlx5e_icosq_wqe_info {
161 	u8 wqe_type;
162 	u8 num_wqebbs;
163 
164 	/* Auxiliary data for different wqe types. */
165 	union {
166 		struct {
167 			struct mlx5e_rq *rq;
168 		} umr;
169 #ifdef CONFIG_MLX5_EN_TLS
170 		struct {
171 			struct mlx5e_ktls_offload_context_rx *priv_rx;
172 		} tls_set_params;
173 		struct {
174 			struct mlx5e_ktls_rx_resync_buf *buf;
175 		} tls_get_params;
176 #endif
177 	};
178 };
179 
180 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
181 
mlx5e_icosq_get_next_pi(struct mlx5e_icosq * sq,u16 size)182 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
183 {
184 	struct mlx5_wq_cyc *wq = &sq->wq;
185 	u16 pi, contig_wqebbs;
186 
187 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
188 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
189 	if (unlikely(contig_wqebbs < size)) {
190 		struct mlx5e_icosq_wqe_info *wi, *edge_wi;
191 
192 		wi = &sq->db.wqe_info[pi];
193 		edge_wi = wi + contig_wqebbs;
194 
195 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
196 		for (; wi < edge_wi; wi++) {
197 			*wi = (struct mlx5e_icosq_wqe_info) {
198 				.wqe_type   = MLX5E_ICOSQ_WQE_NOP,
199 				.num_wqebbs = 1,
200 			};
201 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
202 		}
203 
204 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
205 	}
206 
207 	return pi;
208 }
209 
210 static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc * wq,u16 pc,void __iomem * uar_map,struct mlx5_wqe_ctrl_seg * ctrl)211 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
212 		struct mlx5_wqe_ctrl_seg *ctrl)
213 {
214 	ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
215 	/* ensure wqe is visible to device before updating doorbell record */
216 	dma_wmb();
217 
218 	*wq->db = cpu_to_be32(pc);
219 
220 	/* ensure doorbell record is visible to device before ringing the
221 	 * doorbell
222 	 */
223 	wmb();
224 
225 	mlx5_write64((__be32 *)ctrl, uar_map);
226 }
227 
mlx5e_cq_arm(struct mlx5e_cq * cq)228 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
229 {
230 	struct mlx5_core_cq *mcq;
231 
232 	mcq = &cq->mcq;
233 	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
234 }
235 
236 static inline struct mlx5e_sq_dma *
mlx5e_dma_get(struct mlx5e_txqsq * sq,u32 i)237 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
238 {
239 	return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
240 }
241 
242 static inline void
mlx5e_dma_push(struct mlx5e_txqsq * sq,dma_addr_t addr,u32 size,enum mlx5e_dma_map_type map_type)243 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
244 	       enum mlx5e_dma_map_type map_type)
245 {
246 	struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
247 
248 	dma->addr = addr;
249 	dma->size = size;
250 	dma->type = map_type;
251 }
252 
mlx5e_skb_fifo_get(struct mlx5e_txqsq * sq,u16 i)253 static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i)
254 {
255 	return &sq->db.skb_fifo[i & sq->skb_fifo_mask];
256 }
257 
mlx5e_skb_fifo_push(struct mlx5e_txqsq * sq,struct sk_buff * skb)258 static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb)
259 {
260 	struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++);
261 
262 	*skb_item = skb;
263 }
264 
mlx5e_skb_fifo_pop(struct mlx5e_txqsq * sq)265 static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq)
266 {
267 	return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++);
268 }
269 
270 static inline void
mlx5e_tx_dma_unmap(struct device * pdev,struct mlx5e_sq_dma * dma)271 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
272 {
273 	switch (dma->type) {
274 	case MLX5E_DMA_MAP_SINGLE:
275 		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
276 		break;
277 	case MLX5E_DMA_MAP_PAGE:
278 		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
279 		break;
280 	default:
281 		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
282 	}
283 }
284 
285 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
286 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
287 
mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe * session)288 static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
289 {
290 	return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
291 }
292 
mlx5e_rqwq_reset(struct mlx5e_rq * rq)293 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
294 {
295 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
296 		mlx5_wq_ll_reset(&rq->mpwqe.wq);
297 		rq->mpwqe.actual_wq_head = 0;
298 	} else {
299 		mlx5_wq_cyc_reset(&rq->wqe.wq);
300 	}
301 }
302 
mlx5e_dump_error_cqe(struct mlx5e_cq * cq,u32 qn,struct mlx5_err_cqe * err_cqe)303 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
304 					struct mlx5_err_cqe *err_cqe)
305 {
306 	struct mlx5_cqwq *wq = &cq->wq;
307 	u32 ci;
308 
309 	ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
310 
311 	netdev_err(cq->channel->netdev,
312 		   "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
313 		   cq->mcq.cqn, ci, qn,
314 		   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
315 		   err_cqe->syndrome, err_cqe->vendor_err_synd);
316 	mlx5_dump_err_cqe(cq->mdev, err_cqe);
317 }
318 
mlx5e_rqwq_get_size(struct mlx5e_rq * rq)319 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
320 {
321 	switch (rq->wq_type) {
322 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
323 		return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
324 	default:
325 		return mlx5_wq_cyc_get_size(&rq->wqe.wq);
326 	}
327 }
328 
mlx5e_rqwq_get_cur_sz(struct mlx5e_rq * rq)329 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
330 {
331 	switch (rq->wq_type) {
332 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
333 		return rq->mpwqe.wq.cur_sz;
334 	default:
335 		return rq->wqe.wq.cur_sz;
336 	}
337 }
338 
mlx5e_rqwq_get_head(struct mlx5e_rq * rq)339 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
340 {
341 	switch (rq->wq_type) {
342 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
343 		return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
344 	default:
345 		return mlx5_wq_cyc_get_head(&rq->wqe.wq);
346 	}
347 }
348 
mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq * rq)349 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
350 {
351 	switch (rq->wq_type) {
352 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
353 		return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
354 	default:
355 		return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
356 	}
357 }
358 
359 /* SW parser related functions */
360 
361 struct mlx5e_swp_spec {
362 	__be16 l3_proto;
363 	u8 l4_proto;
364 	u8 is_tun;
365 	__be16 tun_l3_proto;
366 	u8 tun_l4_proto;
367 };
368 
mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg * eseg)369 static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
370 {
371 	/* SWP offsets are in 2-bytes words */
372 	eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
373 	eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
374 	eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
375 	eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
376 }
377 
378 static inline void
mlx5e_set_eseg_swp(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,struct mlx5e_swp_spec * swp_spec)379 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
380 		   struct mlx5e_swp_spec *swp_spec)
381 {
382 	/* SWP offsets are in 2-bytes words */
383 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
384 	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
385 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
386 	if (swp_spec->l4_proto) {
387 		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
388 		if (swp_spec->l4_proto == IPPROTO_UDP)
389 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
390 	}
391 
392 	if (swp_spec->is_tun) {
393 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
394 		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
395 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
396 	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
397 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
398 		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
399 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
400 	}
401 	switch (swp_spec->tun_l4_proto) {
402 	case IPPROTO_UDP:
403 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
404 		fallthrough;
405 	case IPPROTO_TCP:
406 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
407 		break;
408 	}
409 }
410 
mlx5e_stop_room_for_wqe(u16 wqe_size)411 static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
412 {
413 	BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
414 
415 	/* A WQE must not cross the page boundary, hence two conditions:
416 	 * 1. Its size must not exceed the page size.
417 	 * 2. If the WQE size is X, and the space remaining in a page is less
418 	 *    than X, this space needs to be padded with NOPs. So, one WQE of
419 	 *    size X may require up to X-1 WQEBBs of padding, which makes the
420 	 *    stop room of X-1 + X.
421 	 * WQE size is also limited by the hardware limit.
422 	 */
423 
424 	if (__builtin_constant_p(wqe_size))
425 		BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
426 	else
427 		WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
428 
429 	return wqe_size * 2 - 1;
430 }
431 
432 #endif
433