1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * ESP GRO support
10 */
11
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/esp.h>
22 #include <linux/scatterlist.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <net/udp.h>
27
esp4_gro_receive(struct list_head * head,struct sk_buff * skb)28 static struct sk_buff *esp4_gro_receive(struct list_head *head,
29 struct sk_buff *skb)
30 {
31 int offset = skb_gro_offset(skb);
32 struct xfrm_offload *xo;
33 struct xfrm_state *x;
34 __be32 seq;
35 __be32 spi;
36 int err;
37
38 if (!pskb_pull(skb, offset))
39 return NULL;
40
41 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
42 goto out;
43
44 xo = xfrm_offload(skb);
45 if (!xo || !(xo->flags & CRYPTO_DONE)) {
46 struct sec_path *sp = secpath_set(skb);
47
48 if (!sp)
49 goto out;
50
51 if (sp->len == XFRM_MAX_DEPTH)
52 goto out_reset;
53
54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 spi, IPPROTO_ESP, AF_INET);
57 if (!x)
58 goto out_reset;
59
60 skb->mark = xfrm_smark_get(skb->mark, x);
61
62 sp->xvec[sp->len++] = x;
63 sp->olen++;
64
65 xo = xfrm_offload(skb);
66 if (!xo)
67 goto out_reset;
68 }
69
70 xo->flags |= XFRM_GRO;
71
72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75 XFRM_SPI_SKB_CB(skb)->seq = seq;
76
77 /* We don't need to handle errors from xfrm_input, it does all
78 * the error handling and frees the resources on error. */
79 xfrm_input(skb, IPPROTO_ESP, spi, -2);
80
81 return ERR_PTR(-EINPROGRESS);
82 out_reset:
83 secpath_reset(skb);
84 out:
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
88
89 return NULL;
90 }
91
esp4_gso_encap(struct xfrm_state * x,struct sk_buff * skb)92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93 {
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
98
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
102
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106 xo->proto = proto;
107 }
108
xfrm4_tunnel_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110 struct sk_buff *skb,
111 netdev_features_t features)
112 {
113 __skb_push(skb, skb->mac_len);
114 return skb_mac_gso_segment(skb, features);
115 }
116
xfrm4_transport_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)117 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118 struct sk_buff *skb,
119 netdev_features_t features)
120 {
121 const struct net_offload *ops;
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct xfrm_offload *xo = xfrm_offload(skb);
124
125 skb->transport_header += x->props.header_len;
126 ops = rcu_dereference(inet_offloads[xo->proto]);
127 if (likely(ops && ops->callbacks.gso_segment))
128 segs = ops->callbacks.gso_segment(skb, features);
129
130 return segs;
131 }
132
xfrm4_outer_mode_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)133 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
134 struct sk_buff *skb,
135 netdev_features_t features)
136 {
137 switch (x->outer_mode.encap) {
138 case XFRM_MODE_TUNNEL:
139 return xfrm4_tunnel_gso_segment(x, skb, features);
140 case XFRM_MODE_TRANSPORT:
141 return xfrm4_transport_gso_segment(x, skb, features);
142 }
143
144 return ERR_PTR(-EOPNOTSUPP);
145 }
146
esp4_gso_segment(struct sk_buff * skb,netdev_features_t features)147 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
148 netdev_features_t features)
149 {
150 struct xfrm_state *x;
151 struct ip_esp_hdr *esph;
152 struct crypto_aead *aead;
153 netdev_features_t esp_features = features;
154 struct xfrm_offload *xo = xfrm_offload(skb);
155 struct sec_path *sp;
156
157 if (!xo)
158 return ERR_PTR(-EINVAL);
159
160 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
161 return ERR_PTR(-EINVAL);
162
163 sp = skb_sec_path(skb);
164 x = sp->xvec[sp->len - 1];
165 aead = x->data;
166 esph = ip_esp_hdr(skb);
167
168 if (esph->spi != x->id.spi)
169 return ERR_PTR(-EINVAL);
170
171 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
172 return ERR_PTR(-EINVAL);
173
174 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
175
176 skb->encap_hdr_csum = 1;
177
178 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
179 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
180 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
181 NETIF_F_SCTP_CRC);
182 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
183 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
184 esp_features = features & ~(NETIF_F_CSUM_MASK |
185 NETIF_F_SCTP_CRC);
186
187 xo->flags |= XFRM_GSO_SEGMENT;
188
189 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
190 }
191
esp_input_tail(struct xfrm_state * x,struct sk_buff * skb)192 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
193 {
194 struct crypto_aead *aead = x->data;
195 struct xfrm_offload *xo = xfrm_offload(skb);
196
197 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
198 return -EINVAL;
199
200 if (!(xo->flags & CRYPTO_DONE))
201 skb->ip_summed = CHECKSUM_NONE;
202
203 return esp_input_done2(skb, 0);
204 }
205
esp_xmit(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)206 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
207 {
208 int err;
209 int alen;
210 int blksize;
211 struct xfrm_offload *xo;
212 struct ip_esp_hdr *esph;
213 struct crypto_aead *aead;
214 struct esp_info esp;
215 bool hw_offload = true;
216 __u32 seq;
217
218 esp.inplace = true;
219
220 xo = xfrm_offload(skb);
221
222 if (!xo)
223 return -EINVAL;
224
225 if ((!(features & NETIF_F_HW_ESP) &&
226 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
227 x->xso.dev != skb->dev) {
228 xo->flags |= CRYPTO_FALLBACK;
229 hw_offload = false;
230 }
231
232 esp.proto = xo->proto;
233
234 /* skb is pure payload to encrypt */
235
236 aead = x->data;
237 alen = crypto_aead_authsize(aead);
238
239 esp.tfclen = 0;
240 /* XXX: Add support for tfc padding here. */
241
242 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
243 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
244 esp.plen = esp.clen - skb->len - esp.tfclen;
245 esp.tailen = esp.tfclen + esp.plen + alen;
246
247 esp.esph = ip_esp_hdr(skb);
248
249
250 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
251 esp.nfrags = esp_output_head(x, skb, &esp);
252 if (esp.nfrags < 0)
253 return esp.nfrags;
254 }
255
256 seq = xo->seq.low;
257
258 esph = esp.esph;
259 esph->spi = x->id.spi;
260
261 skb_push(skb, -skb_network_offset(skb));
262
263 if (xo->flags & XFRM_GSO_SEGMENT) {
264 esph->seq_no = htonl(seq);
265
266 if (!skb_is_gso(skb))
267 xo->seq.low++;
268 else
269 xo->seq.low += skb_shinfo(skb)->gso_segs;
270 }
271
272 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
273
274 ip_hdr(skb)->tot_len = htons(skb->len);
275 ip_send_check(ip_hdr(skb));
276
277 if (hw_offload)
278 return 0;
279
280 err = esp_output_tail(x, skb, &esp);
281 if (err)
282 return err;
283
284 secpath_reset(skb);
285
286 if (skb_needs_linearize(skb, skb->dev->features) &&
287 __skb_linearize(skb))
288 return -ENOMEM;
289 return 0;
290 }
291
292 static const struct net_offload esp4_offload = {
293 .callbacks = {
294 .gro_receive = esp4_gro_receive,
295 .gso_segment = esp4_gso_segment,
296 },
297 };
298
299 static const struct xfrm_type_offload esp_type_offload = {
300 .description = "ESP4 OFFLOAD",
301 .owner = THIS_MODULE,
302 .proto = IPPROTO_ESP,
303 .input_tail = esp_input_tail,
304 .xmit = esp_xmit,
305 .encap = esp4_gso_encap,
306 };
307
esp4_offload_init(void)308 static int __init esp4_offload_init(void)
309 {
310 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
311 pr_info("%s: can't add xfrm type offload\n", __func__);
312 return -EAGAIN;
313 }
314
315 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
316 }
317
esp4_offload_exit(void)318 static void __exit esp4_offload_exit(void)
319 {
320 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
321 inet_del_offload(&esp4_offload, IPPROTO_ESP);
322 }
323
324 module_init(esp4_offload_init);
325 module_exit(esp4_offload_exit);
326 MODULE_LICENSE("GPL");
327 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
328 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
329