1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * xfrm_device.c - IPsec device offloading code.
4 *
5 * Copyright (c) 2015 secunet Security Networks AG
6 *
7 * Author:
8 * Steffen Klassert <steffen.klassert@secunet.com>
9 */
10
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <net/dst.h>
18 #include <net/xfrm.h>
19 #include <linux/notifier.h>
20
21 #ifdef CONFIG_XFRM_OFFLOAD
__xfrm_transport_prep(struct xfrm_state * x,struct sk_buff * skb,unsigned int hsize)22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23 unsigned int hsize)
24 {
25 struct xfrm_offload *xo = xfrm_offload(skb);
26
27 skb_reset_mac_len(skb);
28 if (xo->flags & XFRM_GSO_SEGMENT)
29 skb->transport_header -= x->props.header_len;
30
31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
32 }
33
__xfrm_mode_tunnel_prep(struct xfrm_state * x,struct sk_buff * skb,unsigned int hsize)34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35 unsigned int hsize)
36
37 {
38 struct xfrm_offload *xo = xfrm_offload(skb);
39
40 if (xo->flags & XFRM_GSO_SEGMENT)
41 skb->transport_header = skb->network_header + hsize;
42
43 skb_reset_mac_len(skb);
44 pskb_pull(skb, skb->mac_len + x->props.header_len);
45 }
46
__xfrm_mode_beet_prep(struct xfrm_state * x,struct sk_buff * skb,unsigned int hsize)47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
48 unsigned int hsize)
49 {
50 struct xfrm_offload *xo = xfrm_offload(skb);
51 int phlen = 0;
52
53 if (xo->flags & XFRM_GSO_SEGMENT)
54 skb->transport_header = skb->network_header + hsize;
55
56 skb_reset_mac_len(skb);
57 if (x->sel.family != AF_INET6) {
58 phlen = IPV4_BEET_PHMAXLEN;
59 if (x->outer_mode.family == AF_INET6)
60 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
61 }
62
63 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
64 }
65
66 /* Adjust pointers into the packet when IPsec is done at layer2 */
xfrm_outer_mode_prep(struct xfrm_state * x,struct sk_buff * skb)67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
68 {
69 switch (x->outer_mode.encap) {
70 case XFRM_MODE_TUNNEL:
71 if (x->outer_mode.family == AF_INET)
72 return __xfrm_mode_tunnel_prep(x, skb,
73 sizeof(struct iphdr));
74 if (x->outer_mode.family == AF_INET6)
75 return __xfrm_mode_tunnel_prep(x, skb,
76 sizeof(struct ipv6hdr));
77 break;
78 case XFRM_MODE_TRANSPORT:
79 if (x->outer_mode.family == AF_INET)
80 return __xfrm_transport_prep(x, skb,
81 sizeof(struct iphdr));
82 if (x->outer_mode.family == AF_INET6)
83 return __xfrm_transport_prep(x, skb,
84 sizeof(struct ipv6hdr));
85 break;
86 case XFRM_MODE_BEET:
87 if (x->outer_mode.family == AF_INET)
88 return __xfrm_mode_beet_prep(x, skb,
89 sizeof(struct iphdr));
90 if (x->outer_mode.family == AF_INET6)
91 return __xfrm_mode_beet_prep(x, skb,
92 sizeof(struct ipv6hdr));
93 break;
94 case XFRM_MODE_ROUTEOPTIMIZATION:
95 case XFRM_MODE_IN_TRIGGER:
96 break;
97 }
98 }
99
xmit_xfrm_check_overflow(struct sk_buff * skb)100 static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
101 {
102 struct xfrm_offload *xo = xfrm_offload(skb);
103 __u32 seq = xo->seq.low;
104
105 seq += skb_shinfo(skb)->gso_segs;
106 if (unlikely(seq < xo->seq.low))
107 return true;
108
109 return false;
110 }
111
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)112 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
113 {
114 int err;
115 unsigned long flags;
116 struct xfrm_state *x;
117 struct softnet_data *sd;
118 struct sk_buff *skb2, *nskb, *pskb = NULL;
119 netdev_features_t esp_features = features;
120 struct xfrm_offload *xo = xfrm_offload(skb);
121 struct net_device *dev = skb->dev;
122 struct sec_path *sp;
123
124 if (!xo || (xo->flags & XFRM_XMIT))
125 return skb;
126
127 if (!(features & NETIF_F_HW_ESP))
128 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
129
130 sp = skb_sec_path(skb);
131 x = sp->xvec[sp->len - 1];
132 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
133 return skb;
134
135 /* This skb was already validated on the upper/virtual dev */
136 if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
137 return skb;
138
139 local_irq_save(flags);
140 sd = this_cpu_ptr(&softnet_data);
141 err = !skb_queue_empty(&sd->xfrm_backlog);
142 local_irq_restore(flags);
143
144 if (err) {
145 *again = true;
146 return skb;
147 }
148
149 if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
150 unlikely(xmit_xfrm_check_overflow(skb)))) {
151 struct sk_buff *segs;
152
153 /* Packet got rerouted, fixup features and segment it. */
154 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
155
156 segs = skb_gso_segment(skb, esp_features);
157 if (IS_ERR(segs)) {
158 kfree_skb(skb);
159 atomic_long_inc(&dev->tx_dropped);
160 return NULL;
161 } else {
162 consume_skb(skb);
163 skb = segs;
164 }
165 }
166
167 if (!skb->next) {
168 esp_features |= skb->dev->gso_partial_features;
169 xfrm_outer_mode_prep(x, skb);
170
171 xo->flags |= XFRM_DEV_RESUME;
172
173 err = x->type_offload->xmit(x, skb, esp_features);
174 if (err) {
175 if (err == -EINPROGRESS)
176 return NULL;
177
178 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
179 kfree_skb(skb);
180 return NULL;
181 }
182
183 skb_push(skb, skb->data - skb_mac_header(skb));
184
185 return skb;
186 }
187
188 skb_list_walk_safe(skb, skb2, nskb) {
189 esp_features |= skb->dev->gso_partial_features;
190 skb_mark_not_on_list(skb2);
191
192 xo = xfrm_offload(skb2);
193 xo->flags |= XFRM_DEV_RESUME;
194
195 xfrm_outer_mode_prep(x, skb2);
196
197 err = x->type_offload->xmit(x, skb2, esp_features);
198 if (!err) {
199 skb2->next = nskb;
200 } else if (err != -EINPROGRESS) {
201 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
202 skb2->next = nskb;
203 kfree_skb_list(skb2);
204 return NULL;
205 } else {
206 if (skb == skb2)
207 skb = nskb;
208 else
209 pskb->next = nskb;
210
211 continue;
212 }
213
214 skb_push(skb2, skb2->data - skb_mac_header(skb2));
215 pskb = skb2;
216 }
217
218 return skb;
219 }
220 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
221
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo)222 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
223 struct xfrm_user_offload *xuo)
224 {
225 int err;
226 struct dst_entry *dst;
227 struct net_device *dev;
228 struct xfrm_state_offload *xso = &x->xso;
229 xfrm_address_t *saddr;
230 xfrm_address_t *daddr;
231
232 if (!x->type_offload)
233 return -EINVAL;
234
235 /* We don't yet support UDP encapsulation and TFC padding. */
236 if (x->encap || x->tfcpad)
237 return -EINVAL;
238
239 if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
240 return -EINVAL;
241
242 dev = dev_get_by_index(net, xuo->ifindex);
243 if (!dev) {
244 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
245 saddr = &x->props.saddr;
246 daddr = &x->id.daddr;
247 } else {
248 saddr = &x->id.daddr;
249 daddr = &x->props.saddr;
250 }
251
252 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
253 x->props.family,
254 xfrm_smark_get(0, x));
255 if (IS_ERR(dst))
256 return 0;
257
258 dev = dst->dev;
259
260 dev_hold(dev);
261 dst_release(dst);
262 }
263
264 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
265 xso->dev = NULL;
266 dev_put(dev);
267 return 0;
268 }
269
270 if (x->props.flags & XFRM_STATE_ESN &&
271 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
272 xso->dev = NULL;
273 dev_put(dev);
274 return -EINVAL;
275 }
276
277 xso->dev = dev;
278 xso->real_dev = dev;
279 xso->num_exthdrs = 1;
280 /* Don't forward bit that is not implemented */
281 xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
282
283 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
284 if (err) {
285 xso->num_exthdrs = 0;
286 xso->flags = 0;
287 xso->dev = NULL;
288 xso->real_dev = NULL;
289 dev_put(dev);
290
291 if (err != -EOPNOTSUPP)
292 return err;
293 }
294
295 return 0;
296 }
297 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
298
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)299 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
300 {
301 int mtu;
302 struct dst_entry *dst = skb_dst(skb);
303 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
304 struct net_device *dev = x->xso.dev;
305
306 if (!x->type_offload || x->encap)
307 return false;
308
309 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
310 (!xdst->child->xfrm)) {
311 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
312 if (skb->len <= mtu)
313 goto ok;
314
315 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
316 goto ok;
317 }
318
319 return false;
320
321 ok:
322 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
323 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
324
325 return true;
326 }
327 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
328
xfrm_dev_resume(struct sk_buff * skb)329 void xfrm_dev_resume(struct sk_buff *skb)
330 {
331 struct net_device *dev = skb->dev;
332 int ret = NETDEV_TX_BUSY;
333 struct netdev_queue *txq;
334 struct softnet_data *sd;
335 unsigned long flags;
336
337 rcu_read_lock();
338 txq = netdev_core_pick_tx(dev, skb, NULL);
339
340 HARD_TX_LOCK(dev, txq, smp_processor_id());
341 if (!netif_xmit_frozen_or_stopped(txq))
342 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
343 HARD_TX_UNLOCK(dev, txq);
344
345 if (!dev_xmit_complete(ret)) {
346 local_irq_save(flags);
347 sd = this_cpu_ptr(&softnet_data);
348 skb_queue_tail(&sd->xfrm_backlog, skb);
349 raise_softirq_irqoff(NET_TX_SOFTIRQ);
350 local_irq_restore(flags);
351 }
352 rcu_read_unlock();
353 }
354 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
355
xfrm_dev_backlog(struct softnet_data * sd)356 void xfrm_dev_backlog(struct softnet_data *sd)
357 {
358 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
359 struct sk_buff_head list;
360 struct sk_buff *skb;
361
362 if (skb_queue_empty(xfrm_backlog))
363 return;
364
365 __skb_queue_head_init(&list);
366
367 spin_lock(&xfrm_backlog->lock);
368 skb_queue_splice_init(xfrm_backlog, &list);
369 spin_unlock(&xfrm_backlog->lock);
370
371 while (!skb_queue_empty(&list)) {
372 skb = __skb_dequeue(&list);
373 xfrm_dev_resume(skb);
374 }
375
376 }
377 #endif
378
xfrm_api_check(struct net_device * dev)379 static int xfrm_api_check(struct net_device *dev)
380 {
381 #ifdef CONFIG_XFRM_OFFLOAD
382 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
383 !(dev->features & NETIF_F_HW_ESP))
384 return NOTIFY_BAD;
385
386 if ((dev->features & NETIF_F_HW_ESP) &&
387 (!(dev->xfrmdev_ops &&
388 dev->xfrmdev_ops->xdo_dev_state_add &&
389 dev->xfrmdev_ops->xdo_dev_state_delete)))
390 return NOTIFY_BAD;
391 #else
392 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
393 return NOTIFY_BAD;
394 #endif
395
396 return NOTIFY_DONE;
397 }
398
xfrm_dev_register(struct net_device * dev)399 static int xfrm_dev_register(struct net_device *dev)
400 {
401 return xfrm_api_check(dev);
402 }
403
xfrm_dev_feat_change(struct net_device * dev)404 static int xfrm_dev_feat_change(struct net_device *dev)
405 {
406 return xfrm_api_check(dev);
407 }
408
xfrm_dev_down(struct net_device * dev)409 static int xfrm_dev_down(struct net_device *dev)
410 {
411 if (dev->features & NETIF_F_HW_ESP)
412 xfrm_dev_state_flush(dev_net(dev), dev, true);
413
414 return NOTIFY_DONE;
415 }
416
xfrm_dev_event(struct notifier_block * this,unsigned long event,void * ptr)417 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
418 {
419 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
420
421 switch (event) {
422 case NETDEV_REGISTER:
423 return xfrm_dev_register(dev);
424
425 case NETDEV_FEAT_CHANGE:
426 return xfrm_dev_feat_change(dev);
427
428 case NETDEV_DOWN:
429 case NETDEV_UNREGISTER:
430 return xfrm_dev_down(dev);
431 }
432 return NOTIFY_DONE;
433 }
434
435 static struct notifier_block xfrm_dev_notifier = {
436 .notifier_call = xfrm_dev_event,
437 };
438
xfrm_dev_init(void)439 void __init xfrm_dev_init(void)
440 {
441 register_netdevice_notifier(&xfrm_dev_notifier);
442 }
443