1 /*
2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/geneve.h>
36 #include <net/dsfield.h>
37 #include "en.h"
38 #include "en/txrx.h"
39 #include "ipoib/ipoib.h"
40 #include "en_accel/en_accel.h"
41 #include "lib/clock.h"
42
mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq * sq,u8 num_dma)43 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
44 {
45 int i;
46
47 for (i = 0; i < num_dma; i++) {
48 struct mlx5e_sq_dma *last_pushed_dma =
49 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
50
51 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
52 }
53 }
54
55 #ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_get_dscp_up(struct mlx5e_priv * priv,struct sk_buff * skb)56 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
57 {
58 int dscp_cp = 0;
59
60 if (skb->protocol == htons(ETH_P_IP))
61 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
62 else if (skb->protocol == htons(ETH_P_IPV6))
63 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
64
65 return priv->dcbx_dp.dscp2prio[dscp_cp];
66 }
67 #endif
68
mlx5e_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)69 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
70 struct net_device *sb_dev)
71 {
72 int txq_ix = netdev_pick_tx(dev, skb, NULL);
73 struct mlx5e_priv *priv = netdev_priv(dev);
74 int up = 0;
75 int ch_ix;
76
77 if (!netdev_get_num_tc(dev))
78 return txq_ix;
79
80 #ifdef CONFIG_MLX5_CORE_EN_DCB
81 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
82 up = mlx5e_get_dscp_up(priv, skb);
83 else
84 #endif
85 if (skb_vlan_tag_present(skb))
86 up = skb_vlan_tag_get_prio(skb);
87
88 /* Normalize any picked txq_ix to [0, num_channels),
89 * So we can return a txq_ix that matches the channel and
90 * packet UP.
91 */
92 ch_ix = priv->txq2sq[txq_ix]->ch_ix;
93
94 return priv->channel_tc2realtxq[ch_ix][up];
95 }
96
mlx5e_skb_l2_header_offset(struct sk_buff * skb)97 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
98 {
99 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
100
101 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
102 }
103
mlx5e_skb_l3_header_offset(struct sk_buff * skb)104 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
105 {
106 if (skb_transport_header_was_set(skb))
107 return skb_transport_offset(skb);
108 else
109 return mlx5e_skb_l2_header_offset(skb);
110 }
111
mlx5e_calc_min_inline(enum mlx5_inline_modes mode,struct sk_buff * skb)112 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
113 struct sk_buff *skb)
114 {
115 u16 hlen;
116
117 switch (mode) {
118 case MLX5_INLINE_MODE_NONE:
119 return 0;
120 case MLX5_INLINE_MODE_TCP_UDP:
121 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
122 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
123 hlen += VLAN_HLEN;
124 break;
125 case MLX5_INLINE_MODE_IP:
126 hlen = mlx5e_skb_l3_header_offset(skb);
127 break;
128 case MLX5_INLINE_MODE_L2:
129 default:
130 hlen = mlx5e_skb_l2_header_offset(skb);
131 }
132 return min_t(u16, hlen, skb_headlen(skb));
133 }
134
mlx5e_insert_vlan(void * start,struct sk_buff * skb,u16 ihs)135 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
136 {
137 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
138 int cpy1_sz = 2 * ETH_ALEN;
139 int cpy2_sz = ihs - cpy1_sz;
140
141 memcpy(vhdr, skb->data, cpy1_sz);
142 vhdr->h_vlan_proto = skb->vlan_proto;
143 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
144 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
145 }
146
147 /* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
148 * need to set L3 checksum flag for IPsec
149 */
150 static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)151 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
152 struct mlx5_wqe_eth_seg *eseg)
153 {
154 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
155 if (skb->encapsulation) {
156 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
157 sq->stats->csum_partial_inner++;
158 } else {
159 sq->stats->csum_partial++;
160 }
161 }
162
163 static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * accel,struct mlx5_wqe_eth_seg * eseg)164 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
165 struct mlx5e_accel_tx_state *accel,
166 struct mlx5_wqe_eth_seg *eseg)
167 {
168 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
169 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
170 if (skb->encapsulation) {
171 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
172 MLX5_ETH_WQE_L4_INNER_CSUM;
173 sq->stats->csum_partial_inner++;
174 } else {
175 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
176 sq->stats->csum_partial++;
177 }
178 #ifdef CONFIG_MLX5_EN_TLS
179 } else if (unlikely(accel && accel->tls.tls_tisn)) {
180 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
181 sq->stats->csum_partial++;
182 #endif
183 } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
184 ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
185
186 } else
187 sq->stats->csum_none++;
188 }
189
190 static inline u16
mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq * sq,struct sk_buff * skb)191 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
192 {
193 struct mlx5e_sq_stats *stats = sq->stats;
194 u16 ihs;
195
196 if (skb->encapsulation) {
197 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
198 stats->tso_inner_packets++;
199 stats->tso_inner_bytes += skb->len - ihs;
200 } else {
201 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
202 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
203 else
204 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
205 stats->tso_packets++;
206 stats->tso_bytes += skb->len - ihs;
207 }
208
209 return ihs;
210 }
211
212 static inline int
mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq * sq,struct sk_buff * skb,unsigned char * skb_data,u16 headlen,struct mlx5_wqe_data_seg * dseg)213 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
214 unsigned char *skb_data, u16 headlen,
215 struct mlx5_wqe_data_seg *dseg)
216 {
217 dma_addr_t dma_addr = 0;
218 u8 num_dma = 0;
219 int i;
220
221 if (headlen) {
222 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
223 DMA_TO_DEVICE);
224 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
225 goto dma_unmap_wqe_err;
226
227 dseg->addr = cpu_to_be64(dma_addr);
228 dseg->lkey = sq->mkey_be;
229 dseg->byte_count = cpu_to_be32(headlen);
230
231 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
232 num_dma++;
233 dseg++;
234 }
235
236 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
237 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
238 int fsz = skb_frag_size(frag);
239
240 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
241 DMA_TO_DEVICE);
242 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
243 goto dma_unmap_wqe_err;
244
245 dseg->addr = cpu_to_be64(dma_addr);
246 dseg->lkey = sq->mkey_be;
247 dseg->byte_count = cpu_to_be32(fsz);
248
249 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
250 num_dma++;
251 dseg++;
252 }
253
254 return num_dma;
255
256 dma_unmap_wqe_err:
257 mlx5e_dma_unmap_wqe_err(sq, num_dma);
258 return -ENOMEM;
259 }
260
261 struct mlx5e_tx_attr {
262 u32 num_bytes;
263 u16 headlen;
264 u16 ihs;
265 __be16 mss;
266 u16 insz;
267 u8 opcode;
268 };
269
270 struct mlx5e_tx_wqe_attr {
271 u16 ds_cnt;
272 u16 ds_cnt_inl;
273 u16 ds_cnt_ids;
274 u8 num_wqebbs;
275 };
276
277 static u8
mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * accel)278 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
279 struct mlx5e_accel_tx_state *accel)
280 {
281 u8 mode;
282
283 #ifdef CONFIG_MLX5_EN_TLS
284 if (accel && accel->tls.tls_tisn)
285 return MLX5_INLINE_MODE_TCP_UDP;
286 #endif
287
288 mode = sq->min_inline_mode;
289
290 if (skb_vlan_tag_present(skb) &&
291 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
292 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
293
294 return mode;
295 }
296
mlx5e_sq_xmit_prepare(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * accel,struct mlx5e_tx_attr * attr)297 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
298 struct mlx5e_accel_tx_state *accel,
299 struct mlx5e_tx_attr *attr)
300 {
301 struct mlx5e_sq_stats *stats = sq->stats;
302
303 if (skb_is_gso(skb)) {
304 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
305
306 *attr = (struct mlx5e_tx_attr) {
307 .opcode = MLX5_OPCODE_LSO,
308 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
309 .ihs = ihs,
310 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
311 .headlen = skb_headlen(skb) - ihs,
312 };
313
314 stats->packets += skb_shinfo(skb)->gso_segs;
315 } else {
316 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
317 u16 ihs = mlx5e_calc_min_inline(mode, skb);
318
319 *attr = (struct mlx5e_tx_attr) {
320 .opcode = MLX5_OPCODE_SEND,
321 .mss = cpu_to_be16(0),
322 .ihs = ihs,
323 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
324 .headlen = skb_headlen(skb) - ihs,
325 };
326
327 stats->packets++;
328 }
329
330 attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
331 stats->bytes += attr->num_bytes;
332 }
333
mlx5e_sq_calc_wqe_attr(struct sk_buff * skb,const struct mlx5e_tx_attr * attr,struct mlx5e_tx_wqe_attr * wqe_attr)334 static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
335 struct mlx5e_tx_wqe_attr *wqe_attr)
336 {
337 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
338 u16 ds_cnt_inl = 0;
339 u16 ds_cnt_ids = 0;
340
341 if (attr->insz)
342 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
343 MLX5_SEND_WQE_DS);
344
345 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
346 if (attr->ihs) {
347 u16 inl = attr->ihs - INL_HDR_START_SZ;
348
349 if (skb_vlan_tag_present(skb))
350 inl += VLAN_HLEN;
351
352 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
353 ds_cnt += ds_cnt_inl;
354 }
355
356 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
357 .ds_cnt = ds_cnt,
358 .ds_cnt_inl = ds_cnt_inl,
359 .ds_cnt_ids = ds_cnt_ids,
360 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
361 };
362 }
363
mlx5e_tx_skb_update_hwts_flags(struct sk_buff * skb)364 static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
365 {
366 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
367 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
368 }
369
mlx5e_tx_check_stop(struct mlx5e_txqsq * sq)370 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
371 {
372 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
373 netif_tx_stop_queue(sq->txq);
374 sq->stats->stopped++;
375 }
376 }
377
378 static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq * sq,struct sk_buff * skb,const struct mlx5e_tx_attr * attr,const struct mlx5e_tx_wqe_attr * wqe_attr,u8 num_dma,struct mlx5e_tx_wqe_info * wi,struct mlx5_wqe_ctrl_seg * cseg,bool xmit_more)379 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
380 const struct mlx5e_tx_attr *attr,
381 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
382 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
383 bool xmit_more)
384 {
385 struct mlx5_wq_cyc *wq = &sq->wq;
386 bool send_doorbell;
387
388 *wi = (struct mlx5e_tx_wqe_info) {
389 .skb = skb,
390 .num_bytes = attr->num_bytes,
391 .num_dma = num_dma,
392 .num_wqebbs = wqe_attr->num_wqebbs,
393 .num_fifo_pkts = 0,
394 };
395
396 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
397 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
398
399 mlx5e_tx_skb_update_hwts_flags(skb);
400
401 sq->pc += wi->num_wqebbs;
402
403 mlx5e_tx_check_stop(sq);
404
405 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
406 if (send_doorbell)
407 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
408 }
409
410 static void
mlx5e_sq_xmit_wqe(struct mlx5e_txqsq * sq,struct sk_buff * skb,const struct mlx5e_tx_attr * attr,const struct mlx5e_tx_wqe_attr * wqe_attr,struct mlx5e_tx_wqe * wqe,u16 pi,bool xmit_more)411 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
412 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
413 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
414 {
415 struct mlx5_wqe_ctrl_seg *cseg;
416 struct mlx5_wqe_eth_seg *eseg;
417 struct mlx5_wqe_data_seg *dseg;
418 struct mlx5e_tx_wqe_info *wi;
419
420 struct mlx5e_sq_stats *stats = sq->stats;
421 int num_dma;
422
423 stats->xmit_more += xmit_more;
424
425 /* fill wqe */
426 wi = &sq->db.wqe_info[pi];
427 cseg = &wqe->ctrl;
428 eseg = &wqe->eth;
429 dseg = wqe->data;
430
431 eseg->mss = attr->mss;
432
433 if (attr->ihs) {
434 if (skb_vlan_tag_present(skb)) {
435 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
436 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
437 stats->added_vlan_packets++;
438 } else {
439 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
440 memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
441 }
442 dseg += wqe_attr->ds_cnt_inl;
443 } else if (skb_vlan_tag_present(skb)) {
444 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
445 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
446 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
447 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
448 stats->added_vlan_packets++;
449 }
450
451 dseg += wqe_attr->ds_cnt_ids;
452 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
453 attr->headlen, dseg);
454 if (unlikely(num_dma < 0))
455 goto err_drop;
456
457 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
458
459 return;
460
461 err_drop:
462 stats->dropped++;
463 dev_kfree_skb_any(skb);
464 }
465
mlx5e_tx_skb_supports_mpwqe(struct sk_buff * skb,struct mlx5e_tx_attr * attr)466 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
467 {
468 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
469 !attr->insz;
470 }
471
mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq * sq,struct mlx5_wqe_eth_seg * eseg)472 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
473 {
474 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
475
476 /* Assumes the session is already running and has at least one packet. */
477 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
478 }
479
mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq * sq,struct mlx5_wqe_eth_seg * eseg)480 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
481 struct mlx5_wqe_eth_seg *eseg)
482 {
483 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
484 struct mlx5e_tx_wqe *wqe;
485 u16 pi;
486
487 pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
488 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
489 net_prefetchw(wqe->data);
490
491 *session = (struct mlx5e_tx_mpwqe) {
492 .wqe = wqe,
493 .bytes_count = 0,
494 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
495 .pkt_count = 0,
496 .inline_on = 0,
497 };
498
499 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
500
501 sq->stats->mpwqe_blks++;
502 }
503
mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq * sq)504 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
505 {
506 return sq->mpwqe.wqe;
507 }
508
mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq * sq,struct mlx5e_xmit_data * txd)509 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
510 {
511 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
512 struct mlx5_wqe_data_seg *dseg;
513
514 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
515
516 session->pkt_count++;
517 session->bytes_count += txd->len;
518
519 dseg->addr = cpu_to_be64(txd->dma_addr);
520 dseg->byte_count = cpu_to_be32(txd->len);
521 dseg->lkey = sq->mkey_be;
522 session->ds_count++;
523
524 sq->stats->mpwqe_pkts++;
525 }
526
mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq * sq)527 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
528 {
529 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
530 u8 ds_count = session->ds_count;
531 struct mlx5_wqe_ctrl_seg *cseg;
532 struct mlx5e_tx_wqe_info *wi;
533 u16 pi;
534
535 cseg = &session->wqe->ctrl;
536 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
537 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
538
539 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
540 wi = &sq->db.wqe_info[pi];
541 *wi = (struct mlx5e_tx_wqe_info) {
542 .skb = NULL,
543 .num_bytes = session->bytes_count,
544 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
545 .num_dma = session->pkt_count,
546 .num_fifo_pkts = session->pkt_count,
547 };
548
549 sq->pc += wi->num_wqebbs;
550
551 session->wqe = NULL;
552
553 mlx5e_tx_check_stop(sq);
554
555 return cseg;
556 }
557
558 static void
mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,bool xmit_more)559 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
560 struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
561 {
562 struct mlx5_wqe_ctrl_seg *cseg;
563 struct mlx5e_xmit_data txd;
564
565 if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
566 mlx5e_tx_mpwqe_session_start(sq, eseg);
567 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
568 mlx5e_tx_mpwqe_session_complete(sq);
569 mlx5e_tx_mpwqe_session_start(sq, eseg);
570 }
571
572 sq->stats->xmit_more += xmit_more;
573
574 txd.data = skb->data;
575 txd.len = skb->len;
576
577 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
578 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
579 goto err_unmap;
580 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
581
582 mlx5e_skb_fifo_push(sq, skb);
583
584 mlx5e_tx_mpwqe_add_dseg(sq, &txd);
585
586 mlx5e_tx_skb_update_hwts_flags(skb);
587
588 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
589 /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
590 cseg = mlx5e_tx_mpwqe_session_complete(sq);
591
592 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
593 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
594 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
595 /* Might stop the queue, but we were asked to ring the doorbell anyway. */
596 cseg = mlx5e_tx_mpwqe_session_complete(sq);
597
598 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
599 }
600
601 return;
602
603 err_unmap:
604 mlx5e_dma_unmap_wqe_err(sq, 1);
605 sq->stats->dropped++;
606 dev_kfree_skb_any(skb);
607 }
608
mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq * sq)609 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
610 {
611 /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
612 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
613 mlx5e_tx_mpwqe_session_complete(sq);
614 }
615
mlx5e_txwqe_build_eseg(struct mlx5e_priv * priv,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * accel,struct mlx5_wqe_eth_seg * eseg,u16 ihs)616 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
617 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
618 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
619 {
620 if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
621 return false;
622
623 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
624
625 return true;
626 }
627
mlx5e_xmit(struct sk_buff * skb,struct net_device * dev)628 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
629 {
630 struct mlx5e_priv *priv = netdev_priv(dev);
631 struct mlx5e_accel_tx_state accel = {};
632 struct mlx5e_tx_wqe_attr wqe_attr;
633 struct mlx5e_tx_attr attr;
634 struct mlx5e_tx_wqe *wqe;
635 struct mlx5e_txqsq *sq;
636 u16 pi;
637
638 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
639
640 /* May send SKBs and WQEs. */
641 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
642 return NETDEV_TX_OK;
643
644 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
645
646 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
647 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
648 struct mlx5_wqe_eth_seg eseg = {};
649
650 if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
651 attr.ihs)))
652 return NETDEV_TX_OK;
653
654 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
655 return NETDEV_TX_OK;
656 }
657
658 mlx5e_tx_mpwqe_ensure_complete(sq);
659 }
660
661 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
662 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
663 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
664
665 /* May update the WQE, but may not post other WQEs. */
666 mlx5e_accel_tx_finish(sq, wqe, &accel,
667 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
668 if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
669 return NETDEV_TX_OK;
670
671 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
672
673 return NETDEV_TX_OK;
674 }
675
mlx5e_sq_xmit_simple(struct mlx5e_txqsq * sq,struct sk_buff * skb,bool xmit_more)676 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
677 {
678 struct mlx5e_tx_wqe_attr wqe_attr;
679 struct mlx5e_tx_attr attr;
680 struct mlx5e_tx_wqe *wqe;
681 u16 pi;
682
683 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
684 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
685 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
686 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
687 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
688 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
689 }
690
mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe_info * wi,u32 * dma_fifo_cc)691 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
692 u32 *dma_fifo_cc)
693 {
694 int i;
695
696 for (i = 0; i < wi->num_dma; i++) {
697 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
698
699 mlx5e_tx_dma_unmap(sq->pdev, dma);
700 }
701 }
702
mlx5e_consume_skb(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5_cqe64 * cqe,int napi_budget)703 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
704 struct mlx5_cqe64 *cqe, int napi_budget)
705 {
706 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
707 struct skb_shared_hwtstamps hwts = {};
708 u64 ts = get_cqe_ts(cqe);
709
710 hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
711 skb_tstamp_tx(skb, &hwts);
712 }
713
714 napi_consume_skb(skb, napi_budget);
715 }
716
mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe_info * wi,struct mlx5_cqe64 * cqe,int napi_budget)717 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
718 struct mlx5_cqe64 *cqe, int napi_budget)
719 {
720 int i;
721
722 for (i = 0; i < wi->num_fifo_pkts; i++) {
723 struct sk_buff *skb = mlx5e_skb_fifo_pop(sq);
724
725 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
726 }
727 }
728
mlx5e_poll_tx_cq(struct mlx5e_cq * cq,int napi_budget)729 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
730 {
731 struct mlx5e_sq_stats *stats;
732 struct mlx5e_txqsq *sq;
733 struct mlx5_cqe64 *cqe;
734 u32 dma_fifo_cc;
735 u32 nbytes;
736 u16 npkts;
737 u16 sqcc;
738 int i;
739
740 sq = container_of(cq, struct mlx5e_txqsq, cq);
741
742 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
743 return false;
744
745 cqe = mlx5_cqwq_get_cqe(&cq->wq);
746 if (!cqe)
747 return false;
748
749 stats = sq->stats;
750
751 npkts = 0;
752 nbytes = 0;
753
754 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
755 * otherwise a cq overrun may occur
756 */
757 sqcc = sq->cc;
758
759 /* avoid dirtying sq cache line every cqe */
760 dma_fifo_cc = sq->dma_fifo_cc;
761
762 i = 0;
763 do {
764 struct mlx5e_tx_wqe_info *wi;
765 u16 wqe_counter;
766 bool last_wqe;
767 u16 ci;
768
769 mlx5_cqwq_pop(&cq->wq);
770
771 wqe_counter = be16_to_cpu(cqe->wqe_counter);
772
773 do {
774 last_wqe = (sqcc == wqe_counter);
775
776 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
777 wi = &sq->db.wqe_info[ci];
778
779 sqcc += wi->num_wqebbs;
780
781 if (likely(wi->skb)) {
782 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
783 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
784
785 npkts++;
786 nbytes += wi->num_bytes;
787 continue;
788 }
789
790 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
791 &dma_fifo_cc)))
792 continue;
793
794 if (wi->num_fifo_pkts) {
795 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
796 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
797
798 npkts += wi->num_fifo_pkts;
799 nbytes += wi->num_bytes;
800 }
801 } while (!last_wqe);
802
803 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
804 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
805 &sq->state)) {
806 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
807 (struct mlx5_err_cqe *)cqe);
808 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
809 queue_work(cq->channel->priv->wq,
810 &sq->recover_work);
811 }
812 stats->cqe_err++;
813 }
814
815 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
816
817 stats->cqes += i;
818
819 mlx5_cqwq_update_db_record(&cq->wq);
820
821 /* ensure cq space is freed before enabling more cqes */
822 wmb();
823
824 sq->dma_fifo_cc = dma_fifo_cc;
825 sq->cc = sqcc;
826
827 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
828
829 if (netif_tx_queue_stopped(sq->txq) &&
830 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
831 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
832 netif_tx_wake_queue(sq->txq);
833 stats->wake++;
834 }
835
836 return (i == MLX5E_TX_CQ_POLL_BUDGET);
837 }
838
mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe_info * wi)839 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
840 {
841 int i;
842
843 for (i = 0; i < wi->num_fifo_pkts; i++)
844 dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq));
845 }
846
mlx5e_free_txqsq_descs(struct mlx5e_txqsq * sq)847 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
848 {
849 struct mlx5e_tx_wqe_info *wi;
850 u32 dma_fifo_cc, nbytes = 0;
851 u16 ci, sqcc, npkts = 0;
852
853 sqcc = sq->cc;
854 dma_fifo_cc = sq->dma_fifo_cc;
855
856 while (sqcc != sq->pc) {
857 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
858 wi = &sq->db.wqe_info[ci];
859
860 sqcc += wi->num_wqebbs;
861
862 if (likely(wi->skb)) {
863 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
864 dev_kfree_skb_any(wi->skb);
865
866 npkts++;
867 nbytes += wi->num_bytes;
868 continue;
869 }
870
871 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
872 continue;
873
874 if (wi->num_fifo_pkts) {
875 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
876 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
877
878 npkts += wi->num_fifo_pkts;
879 nbytes += wi->num_bytes;
880 }
881 }
882
883 sq->dma_fifo_cc = dma_fifo_cc;
884 sq->cc = sqcc;
885
886 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
887 }
888
889 #ifdef CONFIG_MLX5_CORE_IPOIB
890 static inline void
mlx5i_txwqe_build_datagram(struct mlx5_av * av,u32 dqpn,u32 dqkey,struct mlx5_wqe_datagram_seg * dseg)891 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
892 struct mlx5_wqe_datagram_seg *dseg)
893 {
894 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
895 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
896 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
897 }
898
mlx5i_sq_calc_wqe_attr(struct sk_buff * skb,const struct mlx5e_tx_attr * attr,struct mlx5e_tx_wqe_attr * wqe_attr)899 static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
900 const struct mlx5e_tx_attr *attr,
901 struct mlx5e_tx_wqe_attr *wqe_attr)
902 {
903 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
904 u16 ds_cnt_inl = 0;
905
906 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
907
908 if (attr->ihs) {
909 u16 inl = attr->ihs - INL_HDR_START_SZ;
910
911 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
912 ds_cnt += ds_cnt_inl;
913 }
914
915 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
916 .ds_cnt = ds_cnt,
917 .ds_cnt_inl = ds_cnt_inl,
918 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
919 };
920 }
921
mlx5i_sq_xmit(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5_av * av,u32 dqpn,u32 dqkey,bool xmit_more)922 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
923 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
924 {
925 struct mlx5e_tx_wqe_attr wqe_attr;
926 struct mlx5e_tx_attr attr;
927 struct mlx5i_tx_wqe *wqe;
928
929 struct mlx5_wqe_datagram_seg *datagram;
930 struct mlx5_wqe_ctrl_seg *cseg;
931 struct mlx5_wqe_eth_seg *eseg;
932 struct mlx5_wqe_data_seg *dseg;
933 struct mlx5e_tx_wqe_info *wi;
934
935 struct mlx5e_sq_stats *stats = sq->stats;
936 int num_dma;
937 u16 pi;
938
939 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
940 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
941
942 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
943 wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
944
945 stats->xmit_more += xmit_more;
946
947 /* fill wqe */
948 wi = &sq->db.wqe_info[pi];
949 cseg = &wqe->ctrl;
950 datagram = &wqe->datagram;
951 eseg = &wqe->eth;
952 dseg = wqe->data;
953
954 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
955
956 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
957
958 eseg->mss = attr.mss;
959
960 if (attr.ihs) {
961 memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
962 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
963 dseg += wqe_attr.ds_cnt_inl;
964 }
965
966 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
967 attr.headlen, dseg);
968 if (unlikely(num_dma < 0))
969 goto err_drop;
970
971 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
972
973 return;
974
975 err_drop:
976 stats->dropped++;
977 dev_kfree_skb_any(skb);
978 }
979 #endif
980