1 // SPDX-License-Identifier: GPL-2.0
2
3 /* In-place tunneling */
4
5 #include <stdbool.h>
6 #include <string.h>
7
8 #include <linux/stddef.h>
9 #include <linux/bpf.h>
10 #include <linux/if_ether.h>
11 #include <linux/in.h>
12 #include <linux/ip.h>
13 #include <linux/ipv6.h>
14 #include <linux/mpls.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/types.h>
19
20 #include <bpf/bpf_endian.h>
21 #include <bpf/bpf_helpers.h>
22
23 static const int cfg_port = 8000;
24
25 static const int cfg_udp_src = 20000;
26
27 #define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN)
28
29 #define UDP_PORT 5555
30 #define MPLS_OVER_UDP_PORT 6635
31 #define ETH_OVER_UDP_PORT 7777
32 #define VXLAN_UDP_PORT 8472
33
34 #define EXTPROTO_VXLAN 0x1
35
36 #define VXLAN_N_VID (1u << 24)
37 #define VXLAN_VNI_MASK bpf_htonl((VXLAN_N_VID - 1) << 8)
38 #define VXLAN_FLAGS 0x8
39 #define VXLAN_VNI 1
40
41 /* MPLS label 1000 with S bit (last label) set and ttl of 255. */
42 static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
43 MPLS_LS_S_MASK | 0xff);
44
45 struct vxlanhdr {
46 __be32 vx_flags;
47 __be32 vx_vni;
48 } __attribute__((packed));
49
50 struct gre_hdr {
51 __be16 flags;
52 __be16 protocol;
53 } __attribute__((packed));
54
55 union l4hdr {
56 struct udphdr udp;
57 struct gre_hdr gre;
58 };
59
60 struct v4hdr {
61 struct iphdr ip;
62 union l4hdr l4hdr;
63 __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
64 } __attribute__((packed));
65
66 struct v6hdr {
67 struct ipv6hdr ip;
68 union l4hdr l4hdr;
69 __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
70 } __attribute__((packed));
71
set_ipv4_csum(struct iphdr * iph)72 static __always_inline void set_ipv4_csum(struct iphdr *iph)
73 {
74 __u16 *iph16 = (__u16 *)iph;
75 __u32 csum;
76 int i;
77
78 iph->check = 0;
79
80 #pragma clang loop unroll(full)
81 for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
82 csum += *iph16++;
83
84 iph->check = ~((csum & 0xffff) + (csum >> 16));
85 }
86
__encap_ipv4(struct __sk_buff * skb,__u8 encap_proto,__u16 l2_proto,__u16 ext_proto)87 static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
88 __u16 l2_proto, __u16 ext_proto)
89 {
90 __u16 udp_dst = UDP_PORT;
91 struct iphdr iph_inner;
92 struct v4hdr h_outer;
93 struct tcphdr tcph;
94 int olen, l2_len;
95 __u8 *l2_hdr = NULL;
96 int tcp_off;
97 __u64 flags;
98
99 /* Most tests encapsulate a packet into a tunnel with the same
100 * network protocol, and derive the outer header fields from
101 * the inner header.
102 *
103 * The 6in4 case tests different inner and outer protocols. As
104 * the inner is ipv6, but the outer expects an ipv4 header as
105 * input, manually build a struct iphdr based on the ipv6hdr.
106 */
107 if (encap_proto == IPPROTO_IPV6) {
108 const __u32 saddr = (192 << 24) | (168 << 16) | (1 << 8) | 1;
109 const __u32 daddr = (192 << 24) | (168 << 16) | (1 << 8) | 2;
110 struct ipv6hdr iph6_inner;
111
112 /* Read the IPv6 header */
113 if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph6_inner,
114 sizeof(iph6_inner)) < 0)
115 return TC_ACT_OK;
116
117 /* Derive the IPv4 header fields from the IPv6 header */
118 memset(&iph_inner, 0, sizeof(iph_inner));
119 iph_inner.version = 4;
120 iph_inner.ihl = 5;
121 iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) +
122 bpf_ntohs(iph6_inner.payload_len));
123 iph_inner.ttl = iph6_inner.hop_limit - 1;
124 iph_inner.protocol = iph6_inner.nexthdr;
125 iph_inner.saddr = __bpf_constant_htonl(saddr);
126 iph_inner.daddr = __bpf_constant_htonl(daddr);
127
128 tcp_off = sizeof(iph6_inner);
129 } else {
130 if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
131 sizeof(iph_inner)) < 0)
132 return TC_ACT_OK;
133
134 tcp_off = sizeof(iph_inner);
135 }
136
137 /* filter only packets we want */
138 if (iph_inner.ihl != 5 || iph_inner.protocol != IPPROTO_TCP)
139 return TC_ACT_OK;
140
141 if (bpf_skb_load_bytes(skb, ETH_HLEN + tcp_off,
142 &tcph, sizeof(tcph)) < 0)
143 return TC_ACT_OK;
144
145 if (tcph.dest != __bpf_constant_htons(cfg_port))
146 return TC_ACT_OK;
147
148 olen = sizeof(h_outer.ip);
149 l2_len = 0;
150
151 flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
152
153 switch (l2_proto) {
154 case ETH_P_MPLS_UC:
155 l2_len = sizeof(mpls_label);
156 udp_dst = MPLS_OVER_UDP_PORT;
157 break;
158 case ETH_P_TEB:
159 l2_len = ETH_HLEN;
160 if (ext_proto & EXTPROTO_VXLAN) {
161 udp_dst = VXLAN_UDP_PORT;
162 l2_len += sizeof(struct vxlanhdr);
163 } else
164 udp_dst = ETH_OVER_UDP_PORT;
165 break;
166 }
167 flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
168
169 switch (encap_proto) {
170 case IPPROTO_GRE:
171 flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
172 olen += sizeof(h_outer.l4hdr.gre);
173 h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
174 h_outer.l4hdr.gre.flags = 0;
175 break;
176 case IPPROTO_UDP:
177 flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
178 olen += sizeof(h_outer.l4hdr.udp);
179 h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
180 h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
181 h_outer.l4hdr.udp.check = 0;
182 h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) +
183 sizeof(h_outer.l4hdr.udp) +
184 l2_len);
185 break;
186 case IPPROTO_IPIP:
187 case IPPROTO_IPV6:
188 break;
189 default:
190 return TC_ACT_OK;
191 }
192
193 /* add L2 encap (if specified) */
194 l2_hdr = (__u8 *)&h_outer + olen;
195 switch (l2_proto) {
196 case ETH_P_MPLS_UC:
197 *(__u32 *)l2_hdr = mpls_label;
198 break;
199 case ETH_P_TEB:
200 flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
201
202 if (ext_proto & EXTPROTO_VXLAN) {
203 struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
204
205 vxlan_hdr->vx_flags = VXLAN_FLAGS;
206 vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
207
208 l2_hdr += sizeof(struct vxlanhdr);
209 }
210
211 if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
212 return TC_ACT_SHOT;
213
214 break;
215 }
216 olen += l2_len;
217
218 /* add room between mac and network header */
219 if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
220 return TC_ACT_SHOT;
221
222 /* prepare new outer network header */
223 h_outer.ip = iph_inner;
224 h_outer.ip.tot_len = bpf_htons(olen +
225 bpf_ntohs(h_outer.ip.tot_len));
226 h_outer.ip.protocol = encap_proto;
227
228 set_ipv4_csum((void *)&h_outer.ip);
229
230 /* store new outer network header */
231 if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
232 BPF_F_INVALIDATE_HASH) < 0)
233 return TC_ACT_SHOT;
234
235 /* if changing outer proto type, update eth->h_proto */
236 if (encap_proto == IPPROTO_IPV6) {
237 struct ethhdr eth;
238
239 if (bpf_skb_load_bytes(skb, 0, ð, sizeof(eth)) < 0)
240 return TC_ACT_SHOT;
241 eth.h_proto = bpf_htons(ETH_P_IP);
242 if (bpf_skb_store_bytes(skb, 0, ð, sizeof(eth), 0) < 0)
243 return TC_ACT_SHOT;
244 }
245
246 return TC_ACT_OK;
247 }
248
encap_ipv4(struct __sk_buff * skb,__u8 encap_proto,__u16 l2_proto)249 static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
250 __u16 l2_proto)
251 {
252 return __encap_ipv4(skb, encap_proto, l2_proto, 0);
253 }
254
__encap_ipv6(struct __sk_buff * skb,__u8 encap_proto,__u16 l2_proto,__u16 ext_proto)255 static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
256 __u16 l2_proto, __u16 ext_proto)
257 {
258 __u16 udp_dst = UDP_PORT;
259 struct ipv6hdr iph_inner;
260 struct v6hdr h_outer;
261 struct tcphdr tcph;
262 int olen, l2_len;
263 __u8 *l2_hdr = NULL;
264 __u16 tot_len;
265 __u64 flags;
266
267 if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
268 sizeof(iph_inner)) < 0)
269 return TC_ACT_OK;
270
271 /* filter only packets we want */
272 if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(iph_inner),
273 &tcph, sizeof(tcph)) < 0)
274 return TC_ACT_OK;
275
276 if (tcph.dest != __bpf_constant_htons(cfg_port))
277 return TC_ACT_OK;
278
279 olen = sizeof(h_outer.ip);
280 l2_len = 0;
281
282 flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
283
284 switch (l2_proto) {
285 case ETH_P_MPLS_UC:
286 l2_len = sizeof(mpls_label);
287 udp_dst = MPLS_OVER_UDP_PORT;
288 break;
289 case ETH_P_TEB:
290 l2_len = ETH_HLEN;
291 if (ext_proto & EXTPROTO_VXLAN) {
292 udp_dst = VXLAN_UDP_PORT;
293 l2_len += sizeof(struct vxlanhdr);
294 } else
295 udp_dst = ETH_OVER_UDP_PORT;
296 break;
297 }
298 flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
299
300 switch (encap_proto) {
301 case IPPROTO_GRE:
302 flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
303 olen += sizeof(h_outer.l4hdr.gre);
304 h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
305 h_outer.l4hdr.gre.flags = 0;
306 break;
307 case IPPROTO_UDP:
308 flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
309 olen += sizeof(h_outer.l4hdr.udp);
310 h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
311 h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
312 tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
313 sizeof(h_outer.l4hdr.udp) + l2_len;
314 h_outer.l4hdr.udp.check = 0;
315 h_outer.l4hdr.udp.len = bpf_htons(tot_len);
316 break;
317 case IPPROTO_IPV6:
318 break;
319 default:
320 return TC_ACT_OK;
321 }
322
323 /* add L2 encap (if specified) */
324 l2_hdr = (__u8 *)&h_outer + olen;
325 switch (l2_proto) {
326 case ETH_P_MPLS_UC:
327 *(__u32 *)l2_hdr = mpls_label;
328 break;
329 case ETH_P_TEB:
330 flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
331
332 if (ext_proto & EXTPROTO_VXLAN) {
333 struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
334
335 vxlan_hdr->vx_flags = VXLAN_FLAGS;
336 vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
337
338 l2_hdr += sizeof(struct vxlanhdr);
339 }
340
341 if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
342 return TC_ACT_SHOT;
343 break;
344 }
345 olen += l2_len;
346
347 /* add room between mac and network header */
348 if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
349 return TC_ACT_SHOT;
350
351 /* prepare new outer network header */
352 h_outer.ip = iph_inner;
353 h_outer.ip.payload_len = bpf_htons(olen +
354 bpf_ntohs(h_outer.ip.payload_len));
355
356 h_outer.ip.nexthdr = encap_proto;
357
358 /* store new outer network header */
359 if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
360 BPF_F_INVALIDATE_HASH) < 0)
361 return TC_ACT_SHOT;
362
363 return TC_ACT_OK;
364 }
365
encap_ipv6(struct __sk_buff * skb,__u8 encap_proto,__u16 l2_proto)366 static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
367 __u16 l2_proto)
368 {
369 return __encap_ipv6(skb, encap_proto, l2_proto, 0);
370 }
371
372 SEC("encap_ipip_none")
__encap_ipip_none(struct __sk_buff * skb)373 int __encap_ipip_none(struct __sk_buff *skb)
374 {
375 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
376 return encap_ipv4(skb, IPPROTO_IPIP, ETH_P_IP);
377 else
378 return TC_ACT_OK;
379 }
380
381 SEC("encap_gre_none")
__encap_gre_none(struct __sk_buff * skb)382 int __encap_gre_none(struct __sk_buff *skb)
383 {
384 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
385 return encap_ipv4(skb, IPPROTO_GRE, ETH_P_IP);
386 else
387 return TC_ACT_OK;
388 }
389
390 SEC("encap_gre_mpls")
__encap_gre_mpls(struct __sk_buff * skb)391 int __encap_gre_mpls(struct __sk_buff *skb)
392 {
393 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
394 return encap_ipv4(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
395 else
396 return TC_ACT_OK;
397 }
398
399 SEC("encap_gre_eth")
__encap_gre_eth(struct __sk_buff * skb)400 int __encap_gre_eth(struct __sk_buff *skb)
401 {
402 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
403 return encap_ipv4(skb, IPPROTO_GRE, ETH_P_TEB);
404 else
405 return TC_ACT_OK;
406 }
407
408 SEC("encap_udp_none")
__encap_udp_none(struct __sk_buff * skb)409 int __encap_udp_none(struct __sk_buff *skb)
410 {
411 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
412 return encap_ipv4(skb, IPPROTO_UDP, ETH_P_IP);
413 else
414 return TC_ACT_OK;
415 }
416
417 SEC("encap_udp_mpls")
__encap_udp_mpls(struct __sk_buff * skb)418 int __encap_udp_mpls(struct __sk_buff *skb)
419 {
420 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
421 return encap_ipv4(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
422 else
423 return TC_ACT_OK;
424 }
425
426 SEC("encap_udp_eth")
__encap_udp_eth(struct __sk_buff * skb)427 int __encap_udp_eth(struct __sk_buff *skb)
428 {
429 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
430 return encap_ipv4(skb, IPPROTO_UDP, ETH_P_TEB);
431 else
432 return TC_ACT_OK;
433 }
434
435 SEC("encap_vxlan_eth")
__encap_vxlan_eth(struct __sk_buff * skb)436 int __encap_vxlan_eth(struct __sk_buff *skb)
437 {
438 if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
439 return __encap_ipv4(skb, IPPROTO_UDP,
440 ETH_P_TEB,
441 EXTPROTO_VXLAN);
442 else
443 return TC_ACT_OK;
444 }
445
446 SEC("encap_sit_none")
__encap_sit_none(struct __sk_buff * skb)447 int __encap_sit_none(struct __sk_buff *skb)
448 {
449 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
450 return encap_ipv4(skb, IPPROTO_IPV6, ETH_P_IP);
451 else
452 return TC_ACT_OK;
453 }
454
455 SEC("encap_ip6tnl_none")
__encap_ip6tnl_none(struct __sk_buff * skb)456 int __encap_ip6tnl_none(struct __sk_buff *skb)
457 {
458 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
459 return encap_ipv6(skb, IPPROTO_IPV6, ETH_P_IPV6);
460 else
461 return TC_ACT_OK;
462 }
463
464 SEC("encap_ip6gre_none")
__encap_ip6gre_none(struct __sk_buff * skb)465 int __encap_ip6gre_none(struct __sk_buff *skb)
466 {
467 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
468 return encap_ipv6(skb, IPPROTO_GRE, ETH_P_IPV6);
469 else
470 return TC_ACT_OK;
471 }
472
473 SEC("encap_ip6gre_mpls")
__encap_ip6gre_mpls(struct __sk_buff * skb)474 int __encap_ip6gre_mpls(struct __sk_buff *skb)
475 {
476 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
477 return encap_ipv6(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
478 else
479 return TC_ACT_OK;
480 }
481
482 SEC("encap_ip6gre_eth")
__encap_ip6gre_eth(struct __sk_buff * skb)483 int __encap_ip6gre_eth(struct __sk_buff *skb)
484 {
485 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
486 return encap_ipv6(skb, IPPROTO_GRE, ETH_P_TEB);
487 else
488 return TC_ACT_OK;
489 }
490
491 SEC("encap_ip6udp_none")
__encap_ip6udp_none(struct __sk_buff * skb)492 int __encap_ip6udp_none(struct __sk_buff *skb)
493 {
494 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
495 return encap_ipv6(skb, IPPROTO_UDP, ETH_P_IPV6);
496 else
497 return TC_ACT_OK;
498 }
499
500 SEC("encap_ip6udp_mpls")
__encap_ip6udp_mpls(struct __sk_buff * skb)501 int __encap_ip6udp_mpls(struct __sk_buff *skb)
502 {
503 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
504 return encap_ipv6(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
505 else
506 return TC_ACT_OK;
507 }
508
509 SEC("encap_ip6udp_eth")
__encap_ip6udp_eth(struct __sk_buff * skb)510 int __encap_ip6udp_eth(struct __sk_buff *skb)
511 {
512 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
513 return encap_ipv6(skb, IPPROTO_UDP, ETH_P_TEB);
514 else
515 return TC_ACT_OK;
516 }
517
518 SEC("encap_ip6vxlan_eth")
__encap_ip6vxlan_eth(struct __sk_buff * skb)519 int __encap_ip6vxlan_eth(struct __sk_buff *skb)
520 {
521 if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
522 return __encap_ipv6(skb, IPPROTO_UDP,
523 ETH_P_TEB,
524 EXTPROTO_VXLAN);
525 else
526 return TC_ACT_OK;
527 }
528
decap_internal(struct __sk_buff * skb,int off,int len,char proto)529 static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
530 {
531 struct gre_hdr greh;
532 struct udphdr udph;
533 int olen = len;
534
535 switch (proto) {
536 case IPPROTO_IPIP:
537 case IPPROTO_IPV6:
538 break;
539 case IPPROTO_GRE:
540 olen += sizeof(struct gre_hdr);
541 if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0)
542 return TC_ACT_OK;
543 switch (bpf_ntohs(greh.protocol)) {
544 case ETH_P_MPLS_UC:
545 olen += sizeof(mpls_label);
546 break;
547 case ETH_P_TEB:
548 olen += ETH_HLEN;
549 break;
550 }
551 break;
552 case IPPROTO_UDP:
553 olen += sizeof(struct udphdr);
554 if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0)
555 return TC_ACT_OK;
556 switch (bpf_ntohs(udph.dest)) {
557 case MPLS_OVER_UDP_PORT:
558 olen += sizeof(mpls_label);
559 break;
560 case ETH_OVER_UDP_PORT:
561 olen += ETH_HLEN;
562 break;
563 case VXLAN_UDP_PORT:
564 olen += ETH_HLEN + sizeof(struct vxlanhdr);
565 break;
566 }
567 break;
568 default:
569 return TC_ACT_OK;
570 }
571
572 if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC,
573 BPF_F_ADJ_ROOM_FIXED_GSO))
574 return TC_ACT_SHOT;
575
576 return TC_ACT_OK;
577 }
578
decap_ipv4(struct __sk_buff * skb)579 static int decap_ipv4(struct __sk_buff *skb)
580 {
581 struct iphdr iph_outer;
582
583 if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
584 sizeof(iph_outer)) < 0)
585 return TC_ACT_OK;
586
587 if (iph_outer.ihl != 5)
588 return TC_ACT_OK;
589
590 return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
591 iph_outer.protocol);
592 }
593
decap_ipv6(struct __sk_buff * skb)594 static int decap_ipv6(struct __sk_buff *skb)
595 {
596 struct ipv6hdr iph_outer;
597
598 if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
599 sizeof(iph_outer)) < 0)
600 return TC_ACT_OK;
601
602 return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
603 iph_outer.nexthdr);
604 }
605
606 SEC("decap")
decap_f(struct __sk_buff * skb)607 int decap_f(struct __sk_buff *skb)
608 {
609 switch (skb->protocol) {
610 case __bpf_constant_htons(ETH_P_IP):
611 return decap_ipv4(skb);
612 case __bpf_constant_htons(ETH_P_IPV6):
613 return decap_ipv6(skb);
614 default:
615 /* does not match, ignore */
616 return TC_ACT_OK;
617 }
618 }
619
620 char __license[] SEC("license") = "GPL";
621