1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "en/params.h"
5
mlx5e_rx_is_xdp(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)6 static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
7 struct mlx5e_xsk_param *xsk)
8 {
9 return params->xdp_prog || xsk;
10 }
11
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)12 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
13 struct mlx5e_xsk_param *xsk)
14 {
15 u16 headroom;
16
17 if (xsk)
18 return xsk->headroom;
19
20 headroom = NET_IP_ALIGN;
21 if (mlx5e_rx_is_xdp(params, xsk))
22 headroom += XDP_PACKET_HEADROOM;
23 else
24 headroom += MLX5_RX_HEADROOM;
25
26 return headroom;
27 }
28
mlx5e_rx_get_min_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)29 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
30 struct mlx5e_xsk_param *xsk)
31 {
32 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
33 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
34
35 return linear_rq_headroom + hw_mtu;
36 }
37
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)38 u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
39 struct mlx5e_xsk_param *xsk)
40 {
41 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
42
43 /* AF_XDP doesn't build SKBs in place. */
44 if (!xsk)
45 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
46
47 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
48 * special case. It can run with frames smaller than a page, as it
49 * doesn't allocate pages dynamically. However, here we pretend that
50 * fragments are page-sized: it allows to treat XSK frames like pages
51 * by redirecting alloc and free operations to XSK rings and by using
52 * the fact there are no multiple packets per "page" (which is a frame).
53 * The latter is important, because frames may come in a random order,
54 * and we will have trouble assemblying a real page of multiple frames.
55 */
56 if (mlx5e_rx_is_xdp(params, xsk))
57 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
58
59 /* Even if we can go with a smaller fragment size, we must not put
60 * multiple packets into a single frame.
61 */
62 if (xsk)
63 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
64
65 return frag_sz;
66 }
67
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)68 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
69 struct mlx5e_xsk_param *xsk)
70 {
71 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
72
73 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
74 }
75
mlx5e_rx_is_linear_skb(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)76 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
77 struct mlx5e_xsk_param *xsk)
78 {
79 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
80 * than one page. For this, check both with and without xsk.
81 */
82 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
83 mlx5e_rx_get_linear_frag_sz(params, NULL));
84
85 return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
86 }
87
88 #define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
89 MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)90 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
91 struct mlx5e_params *params,
92 struct mlx5e_xsk_param *xsk)
93 {
94 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
95 s8 signed_log_num_strides_param;
96 u8 log_num_strides;
97
98 if (!mlx5e_rx_is_linear_skb(params, xsk))
99 return false;
100
101 if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
102 return false;
103
104 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
105 return true;
106
107 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
108 signed_log_num_strides_param =
109 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
110
111 return signed_log_num_strides_param >= 0;
112 }
113
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)114 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
115 struct mlx5e_xsk_param *xsk)
116 {
117 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
118
119 /* Numbers are unsigned, don't subtract to avoid underflow. */
120 if (params->log_rq_mtu_frames <
121 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
122 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
123
124 return params->log_rq_mtu_frames - log_pkts_per_wqe;
125 }
126
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)127 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
128 struct mlx5e_params *params,
129 struct mlx5e_xsk_param *xsk)
130 {
131 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
132 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
133
134 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
135 }
136
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)137 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
138 struct mlx5e_params *params,
139 struct mlx5e_xsk_param *xsk)
140 {
141 return MLX5_MPWRQ_LOG_WQE_SZ -
142 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
143 }
144
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)145 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
146 struct mlx5e_params *params,
147 struct mlx5e_xsk_param *xsk)
148 {
149 bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
150 mlx5e_rx_is_linear_skb(params, xsk) :
151 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
152
153 return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
154 }
155