• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #ifndef __MLX5E_EN_ACCEL_H__
35 #define __MLX5E_EN_ACCEL_H__
36 
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include "en_accel/ipsec_rxtx.h"
40 #include "en_accel/tls.h"
41 #include "en_accel/tls_rxtx.h"
42 #include "en.h"
43 #include "en/txrx.h"
44 
45 #if IS_ENABLED(CONFIG_GENEVE)
46 #include <net/geneve.h>
47 
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)48 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
49 {
50 	return mlx5_tx_swp_supported(mdev);
51 }
52 
53 static inline void
mlx5e_tx_tunnel_accel(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,u16 ihs)54 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
55 {
56 	struct mlx5e_swp_spec swp_spec = {};
57 	unsigned int offset = 0;
58 	__be16 l3_proto;
59 	u8 l4_proto;
60 
61 	l3_proto = vlan_get_protocol(skb);
62 	switch (l3_proto) {
63 	case htons(ETH_P_IP):
64 		l4_proto = ip_hdr(skb)->protocol;
65 		break;
66 	case htons(ETH_P_IPV6):
67 		l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
68 		break;
69 	default:
70 		return;
71 	}
72 
73 	if (l4_proto != IPPROTO_UDP ||
74 	    udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
75 		return;
76 	swp_spec.l3_proto = l3_proto;
77 	swp_spec.l4_proto = l4_proto;
78 	swp_spec.is_tun = true;
79 	if (inner_ip_hdr(skb)->version == 6) {
80 		swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
81 		swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
82 	} else {
83 		swp_spec.tun_l3_proto = htons(ETH_P_IP);
84 		swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
85 	}
86 
87 	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
88 	if (skb_vlan_tag_present(skb) &&  ihs)
89 		mlx5e_eseg_swp_offsets_add_vlan(eseg);
90 }
91 
92 #else
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)93 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
94 {
95 	return false;
96 }
97 
98 #endif /* CONFIG_GENEVE */
99 
100 static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff * skb)101 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
102 {
103 	int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
104 
105 	udp_hdr(skb)->len = htons(payload_len);
106 }
107 
108 struct mlx5e_accel_tx_state {
109 #ifdef CONFIG_MLX5_EN_TLS
110 	struct mlx5e_accel_tx_tls_state tls;
111 #endif
112 #ifdef CONFIG_MLX5_EN_IPSEC
113 	struct mlx5e_accel_tx_ipsec_state ipsec;
114 #endif
115 };
116 
mlx5e_accel_tx_begin(struct net_device * dev,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * state)117 static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
118 					struct mlx5e_txqsq *sq,
119 					struct sk_buff *skb,
120 					struct mlx5e_accel_tx_state *state)
121 {
122 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
123 		mlx5e_udp_gso_handle_tx_skb(skb);
124 
125 #ifdef CONFIG_MLX5_EN_TLS
126 	if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
127 		/* May send SKBs and WQEs. */
128 		if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
129 			return false;
130 	}
131 #endif
132 
133 #ifdef CONFIG_MLX5_EN_IPSEC
134 	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
135 		if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
136 			return false;
137 	}
138 #endif
139 
140 	return true;
141 }
142 
mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state * state)143 static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
144 {
145 #ifdef CONFIG_MLX5_EN_IPSEC
146 	return mlx5e_ipsec_is_tx_flow(&state->ipsec);
147 #endif
148 
149 	return false;
150 }
151 
mlx5e_accel_tx_ids_len(struct mlx5e_txqsq * sq,struct mlx5e_accel_tx_state * state)152 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
153 						  struct mlx5e_accel_tx_state *state)
154 {
155 #ifdef CONFIG_MLX5_EN_IPSEC
156 	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
157 		return mlx5e_ipsec_tx_ids_len(&state->ipsec);
158 #endif
159 
160 	return 0;
161 }
162 
163 /* Part of the eseg touched by TX offloads */
164 #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
165 
mlx5e_accel_tx_eseg(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,u16 ihs)166 static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
167 				       struct sk_buff *skb,
168 				       struct mlx5_wqe_eth_seg *eseg, u16 ihs)
169 {
170 #ifdef CONFIG_MLX5_EN_IPSEC
171 	if (xfrm_offload(skb))
172 		mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
173 #endif
174 
175 #if IS_ENABLED(CONFIG_GENEVE)
176 	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
177 		mlx5e_tx_tunnel_accel(skb, eseg, ihs);
178 #endif
179 
180 	return true;
181 }
182 
mlx5e_accel_tx_finish(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe * wqe,struct mlx5e_accel_tx_state * state,struct mlx5_wqe_inline_seg * inlseg)183 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
184 					 struct mlx5e_tx_wqe *wqe,
185 					 struct mlx5e_accel_tx_state *state,
186 					 struct mlx5_wqe_inline_seg *inlseg)
187 {
188 #ifdef CONFIG_MLX5_EN_TLS
189 	mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
190 #endif
191 
192 #ifdef CONFIG_MLX5_EN_IPSEC
193 	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
194 	    state->ipsec.xo && state->ipsec.tailen)
195 		mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
196 #endif
197 }
198 
mlx5e_accel_init_rx(struct mlx5e_priv * priv)199 static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
200 {
201 	return mlx5e_ktls_init_rx(priv);
202 }
203 
mlx5e_accel_cleanup_rx(struct mlx5e_priv * priv)204 static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
205 {
206 	mlx5e_ktls_cleanup_rx(priv);
207 }
208 #endif /* __MLX5E_EN_ACCEL_H__ */
209