1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
5 #include <linux/ip.h>
6 #include <linux/udp.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
10 #include <net/gue.h>
11 #include <net/ip.h>
12 #include <net/protocol.h>
13 #include <net/udp.h>
14 #include <net/udp_tunnel.h>
15 #include <net/xfrm.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
18
19 static DEFINE_SPINLOCK(fou_lock);
20 static LIST_HEAD(fou_list);
21
22 struct fou {
23 struct socket *sock;
24 u8 protocol;
25 u16 port;
26 struct udp_offload udp_offloads;
27 struct list_head list;
28 struct rcu_head rcu;
29 };
30
31 struct fou_cfg {
32 u16 type;
33 u8 protocol;
34 struct udp_port_cfg udp_config;
35 };
36
fou_from_sock(struct sock * sk)37 static inline struct fou *fou_from_sock(struct sock *sk)
38 {
39 return sk->sk_user_data;
40 }
41
fou_udp_encap_recv_deliver(struct sk_buff * skb,u8 protocol,size_t len)42 static int fou_udp_encap_recv_deliver(struct sk_buff *skb,
43 u8 protocol, size_t len)
44 {
45 struct iphdr *iph = ip_hdr(skb);
46
47 /* Remove 'len' bytes from the packet (UDP header and
48 * FOU header if present), modify the protocol to the one
49 * we found, and then call rcv_encap.
50 */
51 iph->tot_len = htons(ntohs(iph->tot_len) - len);
52 __skb_pull(skb, len);
53 skb_postpull_rcsum(skb, udp_hdr(skb), len);
54 skb_reset_transport_header(skb);
55
56 return -protocol;
57 }
58
fou_udp_recv(struct sock * sk,struct sk_buff * skb)59 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
60 {
61 struct fou *fou = fou_from_sock(sk);
62
63 if (!fou)
64 return 1;
65
66 return fou_udp_encap_recv_deliver(skb, fou->protocol,
67 sizeof(struct udphdr));
68 }
69
gue_udp_recv(struct sock * sk,struct sk_buff * skb)70 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
71 {
72 struct fou *fou = fou_from_sock(sk);
73 size_t len;
74 struct guehdr *guehdr;
75 struct udphdr *uh;
76
77 if (!fou)
78 return 1;
79
80 len = sizeof(struct udphdr) + sizeof(struct guehdr);
81 if (!pskb_may_pull(skb, len))
82 goto drop;
83
84 uh = udp_hdr(skb);
85 guehdr = (struct guehdr *)&uh[1];
86
87 len += guehdr->hlen << 2;
88 if (!pskb_may_pull(skb, len))
89 goto drop;
90
91 uh = udp_hdr(skb);
92 guehdr = (struct guehdr *)&uh[1];
93
94 if (guehdr->version != 0)
95 goto drop;
96
97 if (guehdr->flags) {
98 /* No support yet */
99 goto drop;
100 }
101
102 return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len);
103 drop:
104 kfree_skb(skb);
105 return 0;
106 }
107
fou_gro_receive(struct sk_buff ** head,struct sk_buff * skb)108 static struct sk_buff **fou_gro_receive(struct sk_buff **head,
109 struct sk_buff *skb)
110 {
111 const struct net_offload *ops;
112 struct sk_buff **pp = NULL;
113 u8 proto = NAPI_GRO_CB(skb)->proto;
114 const struct net_offload **offloads;
115
116 /* We can clear the encap_mark for FOU as we are essentially doing
117 * one of two possible things. We are either adding an L4 tunnel
118 * header to the outer L3 tunnel header, or we are are simply
119 * treating the GRE tunnel header as though it is a UDP protocol
120 * specific header such as VXLAN or GENEVE.
121 */
122 NAPI_GRO_CB(skb)->encap_mark = 0;
123
124 rcu_read_lock();
125 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
126 ops = rcu_dereference(offloads[proto]);
127 if (!ops || !ops->callbacks.gro_receive)
128 goto out_unlock;
129
130 pp = ops->callbacks.gro_receive(head, skb);
131
132 out_unlock:
133 rcu_read_unlock();
134
135 return pp;
136 }
137
fou_gro_complete(struct sk_buff * skb,int nhoff)138 static int fou_gro_complete(struct sk_buff *skb, int nhoff)
139 {
140 const struct net_offload *ops;
141 u8 proto = NAPI_GRO_CB(skb)->proto;
142 int err = -ENOSYS;
143 const struct net_offload **offloads;
144
145 udp_tunnel_gro_complete(skb, nhoff);
146
147 rcu_read_lock();
148 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
149 ops = rcu_dereference(offloads[proto]);
150 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
151 goto out_unlock;
152
153 err = ops->callbacks.gro_complete(skb, nhoff);
154
155 out_unlock:
156 rcu_read_unlock();
157
158 return err;
159 }
160
gue_gro_receive(struct sk_buff ** head,struct sk_buff * skb)161 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
162 struct sk_buff *skb)
163 {
164 const struct net_offload **offloads;
165 const struct net_offload *ops;
166 struct sk_buff **pp = NULL;
167 struct sk_buff *p;
168 u8 proto;
169 struct guehdr *guehdr;
170 unsigned int hlen, guehlen;
171 unsigned int off;
172 int flush = 1;
173
174 off = skb_gro_offset(skb);
175 hlen = off + sizeof(*guehdr);
176 guehdr = skb_gro_header_fast(skb, off);
177 if (skb_gro_header_hard(skb, hlen)) {
178 guehdr = skb_gro_header_slow(skb, hlen, off);
179 if (unlikely(!guehdr))
180 goto out;
181 }
182
183 proto = guehdr->next_hdr;
184
185 rcu_read_lock();
186 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
187 ops = rcu_dereference(offloads[proto]);
188 if (WARN_ON(!ops || !ops->callbacks.gro_receive))
189 goto out_unlock;
190
191 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
192
193 hlen = off + guehlen;
194 if (skb_gro_header_hard(skb, hlen)) {
195 guehdr = skb_gro_header_slow(skb, hlen, off);
196 if (unlikely(!guehdr))
197 goto out_unlock;
198 }
199
200 flush = 0;
201
202 for (p = *head; p; p = p->next) {
203 const struct guehdr *guehdr2;
204
205 if (!NAPI_GRO_CB(p)->same_flow)
206 continue;
207
208 guehdr2 = (struct guehdr *)(p->data + off);
209
210 /* Compare base GUE header to be equal (covers
211 * hlen, version, next_hdr, and flags.
212 */
213 if (guehdr->word != guehdr2->word) {
214 NAPI_GRO_CB(p)->same_flow = 0;
215 continue;
216 }
217
218 /* Compare optional fields are the same. */
219 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
220 guehdr->hlen << 2)) {
221 NAPI_GRO_CB(p)->same_flow = 0;
222 continue;
223 }
224 }
225
226 skb_gro_pull(skb, guehlen);
227
228 /* We can clear the encap_mark for GUE as we are essentially doing
229 * one of two possible things. We are either adding an L4 tunnel
230 * header to the outer L3 tunnel header, or we are are simply
231 * treating the GRE tunnel header as though it is a UDP protocol
232 * specific header such as VXLAN or GENEVE.
233 */
234 NAPI_GRO_CB(skb)->encap_mark = 0;
235
236 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
237 skb_gro_postpull_rcsum(skb, guehdr, guehlen);
238
239 pp = ops->callbacks.gro_receive(head, skb);
240
241 out_unlock:
242 rcu_read_unlock();
243 out:
244 NAPI_GRO_CB(skb)->flush |= flush;
245
246 return pp;
247 }
248
gue_gro_complete(struct sk_buff * skb,int nhoff)249 static int gue_gro_complete(struct sk_buff *skb, int nhoff)
250 {
251 const struct net_offload **offloads;
252 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
253 const struct net_offload *ops;
254 unsigned int guehlen;
255 u8 proto;
256 int err = -ENOENT;
257
258 proto = guehdr->next_hdr;
259
260 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
261
262 rcu_read_lock();
263 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
264 ops = rcu_dereference(offloads[proto]);
265 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
266 goto out_unlock;
267
268 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
269
270 out_unlock:
271 rcu_read_unlock();
272 return err;
273 }
274
fou_add_to_port_list(struct fou * fou)275 static int fou_add_to_port_list(struct fou *fou)
276 {
277 struct fou *fout;
278
279 spin_lock(&fou_lock);
280 list_for_each_entry(fout, &fou_list, list) {
281 if (fou->port == fout->port) {
282 spin_unlock(&fou_lock);
283 return -EALREADY;
284 }
285 }
286
287 list_add(&fou->list, &fou_list);
288 spin_unlock(&fou_lock);
289
290 return 0;
291 }
292
fou_release(struct fou * fou)293 static void fou_release(struct fou *fou)
294 {
295 struct socket *sock = fou->sock;
296 struct sock *sk = sock->sk;
297
298 udp_del_offload(&fou->udp_offloads);
299
300 list_del(&fou->list);
301
302 /* Remove hooks into tunnel socket */
303 sk->sk_user_data = NULL;
304
305 sock_release(sock);
306
307 kfree_rcu(fou, rcu);
308 }
309
fou_encap_init(struct sock * sk,struct fou * fou,struct fou_cfg * cfg)310 static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
311 {
312 udp_sk(sk)->encap_rcv = fou_udp_recv;
313 fou->protocol = cfg->protocol;
314 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
315 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
316 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
317 fou->udp_offloads.ipproto = cfg->protocol;
318
319 return 0;
320 }
321
gue_encap_init(struct sock * sk,struct fou * fou,struct fou_cfg * cfg)322 static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
323 {
324 udp_sk(sk)->encap_rcv = gue_udp_recv;
325 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
326 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
327 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
328
329 return 0;
330 }
331
fou_create(struct net * net,struct fou_cfg * cfg,struct socket ** sockp)332 static int fou_create(struct net *net, struct fou_cfg *cfg,
333 struct socket **sockp)
334 {
335 struct fou *fou = NULL;
336 int err;
337 struct socket *sock = NULL;
338 struct sock *sk;
339
340 /* Open UDP socket */
341 err = udp_sock_create(net, &cfg->udp_config, &sock);
342 if (err < 0)
343 goto error;
344
345 /* Allocate FOU port structure */
346 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
347 if (!fou) {
348 err = -ENOMEM;
349 goto error;
350 }
351
352 sk = sock->sk;
353
354 fou->port = cfg->udp_config.local_udp_port;
355
356 /* Initial for fou type */
357 switch (cfg->type) {
358 case FOU_ENCAP_DIRECT:
359 err = fou_encap_init(sk, fou, cfg);
360 if (err)
361 goto error;
362 break;
363 case FOU_ENCAP_GUE:
364 err = gue_encap_init(sk, fou, cfg);
365 if (err)
366 goto error;
367 break;
368 default:
369 err = -EINVAL;
370 goto error;
371 }
372
373 udp_sk(sk)->encap_type = 1;
374 udp_encap_enable();
375
376 sk->sk_user_data = fou;
377 fou->sock = sock;
378
379 udp_set_convert_csum(sk, true);
380
381 sk->sk_allocation = GFP_ATOMIC;
382
383 if (cfg->udp_config.family == AF_INET) {
384 err = udp_add_offload(&fou->udp_offloads);
385 if (err)
386 goto error;
387 }
388
389 err = fou_add_to_port_list(fou);
390 if (err)
391 goto error;
392
393 if (sockp)
394 *sockp = sock;
395
396 return 0;
397
398 error:
399 kfree(fou);
400 if (sock)
401 sock_release(sock);
402
403 return err;
404 }
405
fou_destroy(struct net * net,struct fou_cfg * cfg)406 static int fou_destroy(struct net *net, struct fou_cfg *cfg)
407 {
408 struct fou *fou;
409 u16 port = cfg->udp_config.local_udp_port;
410 int err = -EINVAL;
411
412 spin_lock(&fou_lock);
413 list_for_each_entry(fou, &fou_list, list) {
414 if (fou->port == port) {
415 udp_del_offload(&fou->udp_offloads);
416 fou_release(fou);
417 err = 0;
418 break;
419 }
420 }
421 spin_unlock(&fou_lock);
422
423 return err;
424 }
425
426 static struct genl_family fou_nl_family = {
427 .id = GENL_ID_GENERATE,
428 .hdrsize = 0,
429 .name = FOU_GENL_NAME,
430 .version = FOU_GENL_VERSION,
431 .maxattr = FOU_ATTR_MAX,
432 .netnsok = true,
433 };
434
435 static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
436 [FOU_ATTR_PORT] = { .type = NLA_U16, },
437 [FOU_ATTR_AF] = { .type = NLA_U8, },
438 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
439 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
440 };
441
parse_nl_config(struct genl_info * info,struct fou_cfg * cfg)442 static int parse_nl_config(struct genl_info *info,
443 struct fou_cfg *cfg)
444 {
445 memset(cfg, 0, sizeof(*cfg));
446
447 cfg->udp_config.family = AF_INET;
448
449 if (info->attrs[FOU_ATTR_AF]) {
450 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
451
452 if (family != AF_INET && family != AF_INET6)
453 return -EINVAL;
454
455 cfg->udp_config.family = family;
456 }
457
458 if (info->attrs[FOU_ATTR_PORT]) {
459 u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
460
461 cfg->udp_config.local_udp_port = port;
462 }
463
464 if (info->attrs[FOU_ATTR_IPPROTO])
465 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
466
467 if (info->attrs[FOU_ATTR_TYPE])
468 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
469
470 return 0;
471 }
472
fou_nl_cmd_add_port(struct sk_buff * skb,struct genl_info * info)473 static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
474 {
475 struct fou_cfg cfg;
476 int err;
477
478 err = parse_nl_config(info, &cfg);
479 if (err)
480 return err;
481
482 return fou_create(&init_net, &cfg, NULL);
483 }
484
fou_nl_cmd_rm_port(struct sk_buff * skb,struct genl_info * info)485 static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
486 {
487 struct fou_cfg cfg;
488
489 parse_nl_config(info, &cfg);
490
491 return fou_destroy(&init_net, &cfg);
492 }
493
494 static const struct genl_ops fou_nl_ops[] = {
495 {
496 .cmd = FOU_CMD_ADD,
497 .doit = fou_nl_cmd_add_port,
498 .policy = fou_nl_policy,
499 .flags = GENL_ADMIN_PERM,
500 },
501 {
502 .cmd = FOU_CMD_DEL,
503 .doit = fou_nl_cmd_rm_port,
504 .policy = fou_nl_policy,
505 .flags = GENL_ADMIN_PERM,
506 },
507 };
508
fou_init(void)509 static int __init fou_init(void)
510 {
511 int ret;
512
513 ret = genl_register_family_with_ops(&fou_nl_family,
514 fou_nl_ops);
515
516 return ret;
517 }
518
fou_fini(void)519 static void __exit fou_fini(void)
520 {
521 struct fou *fou, *next;
522
523 genl_unregister_family(&fou_nl_family);
524
525 /* Close all the FOU sockets */
526
527 spin_lock(&fou_lock);
528 list_for_each_entry_safe(fou, next, &fou_list, list)
529 fou_release(fou);
530 spin_unlock(&fou_lock);
531 }
532
533 module_init(fou_init);
534 module_exit(fou_fini);
535 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
536 MODULE_LICENSE("GPL");
537