1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/socket.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/printk.h>
12
13 #include <net/protocol.h>
14 #include <net/ipv6.h>
15 #include <net/inet_common.h>
16 #include <net/tcp.h>
17 #include <net/udp.h>
18 #include <net/gro.h>
19
20 #include "ip6_offload.h"
21
22 /* All GRO functions are always builtin, except UDP over ipv6, which lays in
23 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
24 * when ipv6 is built as a module
25 */
26 #if IS_BUILTIN(CONFIG_IPV6)
27 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
28 #else
29 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
30 #endif
31
32 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
33 ({ \
34 unlikely(gro_recursion_inc_test(skb)) ? \
35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
36 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
37 })
38
ipv6_gso_pull_exthdrs(struct sk_buff * skb,int proto)39 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
40 {
41 const struct net_offload *ops = NULL;
42
43 for (;;) {
44 struct ipv6_opt_hdr *opth;
45 int len;
46
47 if (proto != NEXTHDR_HOP) {
48 ops = rcu_dereference(inet6_offloads[proto]);
49
50 if (unlikely(!ops))
51 break;
52
53 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
54 break;
55 }
56
57 if (unlikely(!pskb_may_pull(skb, 8)))
58 break;
59
60 opth = (void *)skb->data;
61 len = ipv6_optlen(opth);
62
63 if (unlikely(!pskb_may_pull(skb, len)))
64 break;
65
66 opth = (void *)skb->data;
67 proto = opth->nexthdr;
68 __skb_pull(skb, len);
69 }
70
71 return proto;
72 }
73
ipv6_gso_segment(struct sk_buff * skb,netdev_features_t features)74 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
75 netdev_features_t features)
76 {
77 struct sk_buff *segs = ERR_PTR(-EINVAL);
78 struct ipv6hdr *ipv6h;
79 const struct net_offload *ops;
80 int proto;
81 struct frag_hdr *fptr;
82 unsigned int payload_len;
83 u8 *prevhdr;
84 int offset = 0;
85 bool encap, udpfrag;
86 int nhoff;
87 bool gso_partial;
88
89 skb_reset_network_header(skb);
90 nhoff = skb_network_header(skb) - skb_mac_header(skb);
91 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
92 goto out;
93
94 encap = SKB_GSO_CB(skb)->encap_level > 0;
95 if (encap)
96 features &= skb->dev->hw_enc_features;
97 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
98
99 ipv6h = ipv6_hdr(skb);
100 __skb_pull(skb, sizeof(*ipv6h));
101 segs = ERR_PTR(-EPROTONOSUPPORT);
102
103 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
104
105 if (skb->encapsulation &&
106 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
107 udpfrag = proto == IPPROTO_UDP && encap &&
108 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
109 else
110 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
111 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
112
113 ops = rcu_dereference(inet6_offloads[proto]);
114 if (likely(ops && ops->callbacks.gso_segment)) {
115 skb_reset_transport_header(skb);
116 segs = ops->callbacks.gso_segment(skb, features);
117 if (!segs)
118 skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
119 }
120
121 if (IS_ERR_OR_NULL(segs))
122 goto out;
123
124 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
125
126 for (skb = segs; skb; skb = skb->next) {
127 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
128 if (gso_partial && skb_is_gso(skb))
129 payload_len = skb_shinfo(skb)->gso_size +
130 SKB_GSO_CB(skb)->data_offset +
131 skb->head - (unsigned char *)(ipv6h + 1);
132 else
133 payload_len = skb->len - nhoff - sizeof(*ipv6h);
134 ipv6h->payload_len = htons(payload_len);
135 skb->network_header = (u8 *)ipv6h - skb->head;
136 skb_reset_mac_len(skb);
137
138 if (udpfrag) {
139 int err = ip6_find_1stfragopt(skb, &prevhdr);
140 if (err < 0) {
141 kfree_skb_list(segs);
142 return ERR_PTR(err);
143 }
144 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
145 fptr->frag_off = htons(offset);
146 if (skb->next)
147 fptr->frag_off |= htons(IP6_MF);
148 offset += (ntohs(ipv6h->payload_len) -
149 sizeof(struct frag_hdr));
150 }
151 if (encap)
152 skb_reset_inner_headers(skb);
153 }
154
155 out:
156 return segs;
157 }
158
159 /* Return the total length of all the extension hdrs, following the same
160 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
161 */
ipv6_exthdrs_len(struct ipv6hdr * iph,const struct net_offload ** opps)162 static int ipv6_exthdrs_len(struct ipv6hdr *iph,
163 const struct net_offload **opps)
164 {
165 struct ipv6_opt_hdr *opth = (void *)iph;
166 int len = 0, proto, optlen = sizeof(*iph);
167
168 proto = iph->nexthdr;
169 for (;;) {
170 if (proto != NEXTHDR_HOP) {
171 *opps = rcu_dereference(inet6_offloads[proto]);
172 if (unlikely(!(*opps)))
173 break;
174 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
175 break;
176 }
177 opth = (void *)opth + optlen;
178 optlen = ipv6_optlen(opth);
179 len += optlen;
180 proto = opth->nexthdr;
181 }
182 return len;
183 }
184
ipv6_gro_receive(struct list_head * head,struct sk_buff * skb)185 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
186 struct sk_buff *skb)
187 {
188 const struct net_offload *ops;
189 struct sk_buff *pp = NULL;
190 struct sk_buff *p;
191 struct ipv6hdr *iph;
192 unsigned int nlen;
193 unsigned int hlen;
194 unsigned int off;
195 u16 flush = 1;
196 int proto;
197
198 off = skb_gro_offset(skb);
199 hlen = off + sizeof(*iph);
200 iph = skb_gro_header_fast(skb, off);
201 if (skb_gro_header_hard(skb, hlen)) {
202 iph = skb_gro_header_slow(skb, hlen, off);
203 if (unlikely(!iph))
204 goto out;
205 }
206
207 skb_set_network_header(skb, off);
208 skb_gro_pull(skb, sizeof(*iph));
209 skb_set_transport_header(skb, skb_gro_offset(skb));
210
211 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
212
213 rcu_read_lock();
214 proto = iph->nexthdr;
215 ops = rcu_dereference(inet6_offloads[proto]);
216 if (!ops || !ops->callbacks.gro_receive) {
217 __pskb_pull(skb, skb_gro_offset(skb));
218 skb_gro_frag0_invalidate(skb);
219 proto = ipv6_gso_pull_exthdrs(skb, proto);
220 skb_gro_pull(skb, -skb_transport_offset(skb));
221 skb_reset_transport_header(skb);
222 __skb_push(skb, skb_gro_offset(skb));
223
224 ops = rcu_dereference(inet6_offloads[proto]);
225 if (!ops || !ops->callbacks.gro_receive)
226 goto out_unlock;
227
228 iph = ipv6_hdr(skb);
229 }
230
231 NAPI_GRO_CB(skb)->proto = proto;
232
233 flush--;
234 nlen = skb_network_header_len(skb);
235
236 list_for_each_entry(p, head, list) {
237 const struct ipv6hdr *iph2;
238 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
239
240 if (!NAPI_GRO_CB(p)->same_flow)
241 continue;
242
243 iph2 = (struct ipv6hdr *)(p->data + off);
244 first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
245
246 /* All fields must match except length and Traffic Class.
247 * XXX skbs on the gro_list have all been parsed and pulled
248 * already so we don't need to compare nlen
249 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
250 * memcmp() alone below is sufficient, right?
251 */
252 if ((first_word & htonl(0xF00FFFFF)) ||
253 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
254 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
255 *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
256 not_same_flow:
257 NAPI_GRO_CB(p)->same_flow = 0;
258 continue;
259 }
260 if (unlikely(nlen > sizeof(struct ipv6hdr))) {
261 if (memcmp(iph + 1, iph2 + 1,
262 nlen - sizeof(struct ipv6hdr)))
263 goto not_same_flow;
264 }
265 /* flush if Traffic Class fields are different */
266 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
267 NAPI_GRO_CB(p)->flush |= flush;
268
269 /* If the previous IP ID value was based on an atomic
270 * datagram we can overwrite the value and ignore it.
271 */
272 if (NAPI_GRO_CB(skb)->is_atomic)
273 NAPI_GRO_CB(p)->flush_id = 0;
274 }
275
276 NAPI_GRO_CB(skb)->is_atomic = true;
277 NAPI_GRO_CB(skb)->flush |= flush;
278
279 skb_gro_postpull_rcsum(skb, iph, nlen);
280
281 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
282 ops->callbacks.gro_receive, head, skb);
283
284 out_unlock:
285 rcu_read_unlock();
286
287 out:
288 skb_gro_flush_final(skb, pp, flush);
289
290 return pp;
291 }
292
sit_ip6ip6_gro_receive(struct list_head * head,struct sk_buff * skb)293 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
294 struct sk_buff *skb)
295 {
296 /* Common GRO receive for SIT and IP6IP6 */
297
298 if (NAPI_GRO_CB(skb)->encap_mark) {
299 NAPI_GRO_CB(skb)->flush = 1;
300 return NULL;
301 }
302
303 NAPI_GRO_CB(skb)->encap_mark = 1;
304
305 return ipv6_gro_receive(head, skb);
306 }
307
ip4ip6_gro_receive(struct list_head * head,struct sk_buff * skb)308 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
309 struct sk_buff *skb)
310 {
311 /* Common GRO receive for SIT and IP6IP6 */
312
313 if (NAPI_GRO_CB(skb)->encap_mark) {
314 NAPI_GRO_CB(skb)->flush = 1;
315 return NULL;
316 }
317
318 NAPI_GRO_CB(skb)->encap_mark = 1;
319
320 return inet_gro_receive(head, skb);
321 }
322
ipv6_gro_complete(struct sk_buff * skb,int nhoff)323 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
324 {
325 const struct net_offload *ops;
326 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
327 int err = -ENOSYS;
328
329 if (skb->encapsulation) {
330 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
331 skb_set_inner_network_header(skb, nhoff);
332 }
333
334 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
335
336 rcu_read_lock();
337
338 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
339 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
340 goto out_unlock;
341
342 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
343 udp6_gro_complete, skb, nhoff);
344
345 out_unlock:
346 rcu_read_unlock();
347
348 return err;
349 }
350
sit_gro_complete(struct sk_buff * skb,int nhoff)351 static int sit_gro_complete(struct sk_buff *skb, int nhoff)
352 {
353 skb->encapsulation = 1;
354 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
355 return ipv6_gro_complete(skb, nhoff);
356 }
357
ip6ip6_gro_complete(struct sk_buff * skb,int nhoff)358 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
359 {
360 skb->encapsulation = 1;
361 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
362 return ipv6_gro_complete(skb, nhoff);
363 }
364
ip4ip6_gro_complete(struct sk_buff * skb,int nhoff)365 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
366 {
367 skb->encapsulation = 1;
368 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
369 return inet_gro_complete(skb, nhoff);
370 }
371
372 static struct packet_offload ipv6_packet_offload __read_mostly = {
373 .type = cpu_to_be16(ETH_P_IPV6),
374 .callbacks = {
375 .gso_segment = ipv6_gso_segment,
376 .gro_receive = ipv6_gro_receive,
377 .gro_complete = ipv6_gro_complete,
378 },
379 };
380
sit_gso_segment(struct sk_buff * skb,netdev_features_t features)381 static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
382 netdev_features_t features)
383 {
384 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
385 return ERR_PTR(-EINVAL);
386
387 return ipv6_gso_segment(skb, features);
388 }
389
ip4ip6_gso_segment(struct sk_buff * skb,netdev_features_t features)390 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
391 netdev_features_t features)
392 {
393 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
394 return ERR_PTR(-EINVAL);
395
396 return inet_gso_segment(skb, features);
397 }
398
ip6ip6_gso_segment(struct sk_buff * skb,netdev_features_t features)399 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
400 netdev_features_t features)
401 {
402 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
403 return ERR_PTR(-EINVAL);
404
405 return ipv6_gso_segment(skb, features);
406 }
407
408 static const struct net_offload sit_offload = {
409 .callbacks = {
410 .gso_segment = sit_gso_segment,
411 .gro_receive = sit_ip6ip6_gro_receive,
412 .gro_complete = sit_gro_complete,
413 },
414 };
415
416 static const struct net_offload ip4ip6_offload = {
417 .callbacks = {
418 .gso_segment = ip4ip6_gso_segment,
419 .gro_receive = ip4ip6_gro_receive,
420 .gro_complete = ip4ip6_gro_complete,
421 },
422 };
423
424 static const struct net_offload ip6ip6_offload = {
425 .callbacks = {
426 .gso_segment = ip6ip6_gso_segment,
427 .gro_receive = sit_ip6ip6_gro_receive,
428 .gro_complete = ip6ip6_gro_complete,
429 },
430 };
ipv6_offload_init(void)431 static int __init ipv6_offload_init(void)
432 {
433
434 if (tcpv6_offload_init() < 0)
435 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
436 if (ipv6_exthdrs_offload_init() < 0)
437 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
438
439 dev_add_offload(&ipv6_packet_offload);
440
441 inet_add_offload(&sit_offload, IPPROTO_IPV6);
442 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
443 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
444
445 return 0;
446 }
447
448 fs_initcall(ipv6_offload_init);
449