1 // SPDX-License-Identifier: GPL-2.0-only
2 /* L2TP core.
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This file contains some code of the original L2TPv2 pppol2tp
7 * driver, which has the following copyright:
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/kthread.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/errno.h>
31 #include <linux/jiffies.h>
32
33 #include <linux/netdevice.h>
34 #include <linux/net.h>
35 #include <linux/inetdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/udp.h>
41 #include <linux/l2tp.h>
42 #include <linux/hash.h>
43 #include <linux/sort.h>
44 #include <linux/file.h>
45 #include <linux/nsproxy.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/dst.h>
49 #include <net/ip.h>
50 #include <net/udp.h>
51 #include <net/udp_tunnel.h>
52 #include <net/inet_common.h>
53 #include <net/xfrm.h>
54 #include <net/protocol.h>
55 #include <net/inet6_connection_sock.h>
56 #include <net/inet_ecn.h>
57 #include <net/ip6_route.h>
58 #include <net/ip6_checksum.h>
59
60 #include <asm/byteorder.h>
61 #include <linux/atomic.h>
62
63 #include "l2tp_core.h"
64 #include "trace.h"
65
66 #define CREATE_TRACE_POINTS
67 #include "trace.h"
68
69 #define L2TP_DRV_VERSION "V2.0"
70
71 /* L2TP header constants */
72 #define L2TP_HDRFLAG_T 0x8000
73 #define L2TP_HDRFLAG_L 0x4000
74 #define L2TP_HDRFLAG_S 0x0800
75 #define L2TP_HDRFLAG_O 0x0200
76 #define L2TP_HDRFLAG_P 0x0100
77
78 #define L2TP_HDR_VER_MASK 0x000F
79 #define L2TP_HDR_VER_2 0x0002
80 #define L2TP_HDR_VER_3 0x0003
81
82 /* L2TPv3 default L2-specific sublayer */
83 #define L2TP_SLFLAG_S 0x40000000
84 #define L2TP_SL_SEQ_MASK 0x00ffffff
85
86 #define L2TP_HDR_SIZE_MAX 14
87
88 /* Default trace flags */
89 #define L2TP_DEFAULT_DEBUG_FLAGS 0
90
91 #define L2TP_DEPTH_NESTING 2
92 #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
93 #error "L2TP requires its own lockdep subclass"
94 #endif
95
96 /* Private data stored for received packets in the skb.
97 */
98 struct l2tp_skb_cb {
99 u32 ns;
100 u16 has_seq;
101 u16 length;
102 unsigned long expires;
103 };
104
105 #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
106
107 static struct workqueue_struct *l2tp_wq;
108
109 /* per-net private data for this module */
110 static unsigned int l2tp_net_id;
111 struct l2tp_net {
112 /* Lock for write access to l2tp_tunnel_idr */
113 spinlock_t l2tp_tunnel_idr_lock;
114 struct idr l2tp_tunnel_idr;
115 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
116 /* Lock for write access to l2tp_session_hlist */
117 spinlock_t l2tp_session_hlist_lock;
118 };
119
120 #if IS_ENABLED(CONFIG_IPV6)
l2tp_sk_is_v6(struct sock * sk)121 static bool l2tp_sk_is_v6(struct sock *sk)
122 {
123 return sk->sk_family == PF_INET6 &&
124 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
125 }
126 #endif
127
l2tp_pernet(const struct net * net)128 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
129 {
130 return net_generic(net, l2tp_net_id);
131 }
132
133 /* Session hash global list for L2TPv3.
134 * The session_id SHOULD be random according to RFC3931, but several
135 * L2TP implementations use incrementing session_ids. So we do a real
136 * hash on the session_id, rather than a simple bitmask.
137 */
138 static inline struct hlist_head *
l2tp_session_id_hash_2(struct l2tp_net * pn,u32 session_id)139 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
140 {
141 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
142 }
143
144 /* Session hash list.
145 * The session_id SHOULD be random according to RFC2661, but several
146 * L2TP implementations (Cisco and Microsoft) use incrementing
147 * session_ids. So we do a real hash on the session_id, rather than a
148 * simple bitmask.
149 */
150 static inline struct hlist_head *
l2tp_session_id_hash(struct l2tp_tunnel * tunnel,u32 session_id)151 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
152 {
153 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
154 }
155
l2tp_tunnel_free(struct l2tp_tunnel * tunnel)156 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
157 {
158 trace_free_tunnel(tunnel);
159 sock_put(tunnel->sock);
160 /* the tunnel is freed in the socket destructor */
161 }
162
l2tp_session_free(struct l2tp_session * session)163 static void l2tp_session_free(struct l2tp_session *session)
164 {
165 trace_free_session(session);
166 if (session->tunnel)
167 l2tp_tunnel_dec_refcount(session->tunnel);
168 kfree(session);
169 }
170
l2tp_sk_to_tunnel(struct sock * sk)171 struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
172 {
173 struct l2tp_tunnel *tunnel = sk->sk_user_data;
174
175 if (tunnel)
176 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
177 return NULL;
178
179 return tunnel;
180 }
181 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
182
l2tp_tunnel_inc_refcount(struct l2tp_tunnel * tunnel)183 void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
184 {
185 refcount_inc(&tunnel->ref_count);
186 }
187 EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
188
l2tp_tunnel_dec_refcount(struct l2tp_tunnel * tunnel)189 void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
190 {
191 if (refcount_dec_and_test(&tunnel->ref_count))
192 l2tp_tunnel_free(tunnel);
193 }
194 EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
195
l2tp_session_inc_refcount(struct l2tp_session * session)196 void l2tp_session_inc_refcount(struct l2tp_session *session)
197 {
198 refcount_inc(&session->ref_count);
199 }
200 EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
201
l2tp_session_dec_refcount(struct l2tp_session * session)202 void l2tp_session_dec_refcount(struct l2tp_session *session)
203 {
204 if (refcount_dec_and_test(&session->ref_count))
205 l2tp_session_free(session);
206 }
207 EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
208
209 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
l2tp_tunnel_get(const struct net * net,u32 tunnel_id)210 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
211 {
212 const struct l2tp_net *pn = l2tp_pernet(net);
213 struct l2tp_tunnel *tunnel;
214
215 rcu_read_lock_bh();
216 tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
217 if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
218 rcu_read_unlock_bh();
219 return tunnel;
220 }
221 rcu_read_unlock_bh();
222
223 return NULL;
224 }
225 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
226
l2tp_tunnel_get_nth(const struct net * net,int nth)227 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
228 {
229 struct l2tp_net *pn = l2tp_pernet(net);
230 unsigned long tunnel_id, tmp;
231 struct l2tp_tunnel *tunnel;
232 int count = 0;
233
234 rcu_read_lock_bh();
235 idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
236 if (tunnel && ++count > nth &&
237 refcount_inc_not_zero(&tunnel->ref_count)) {
238 rcu_read_unlock_bh();
239 return tunnel;
240 }
241 }
242 rcu_read_unlock_bh();
243
244 return NULL;
245 }
246 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
247
l2tp_tunnel_get_session(struct l2tp_tunnel * tunnel,u32 session_id)248 struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
249 u32 session_id)
250 {
251 struct hlist_head *session_list;
252 struct l2tp_session *session;
253
254 session_list = l2tp_session_id_hash(tunnel, session_id);
255
256 rcu_read_lock_bh();
257 hlist_for_each_entry_rcu(session, session_list, hlist)
258 if (session->session_id == session_id) {
259 l2tp_session_inc_refcount(session);
260 rcu_read_unlock_bh();
261
262 return session;
263 }
264 rcu_read_unlock_bh();
265
266 return NULL;
267 }
268 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
269
l2tp_session_get(const struct net * net,u32 session_id)270 struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
271 {
272 struct hlist_head *session_list;
273 struct l2tp_session *session;
274
275 session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
276
277 rcu_read_lock_bh();
278 hlist_for_each_entry_rcu(session, session_list, global_hlist)
279 if (session->session_id == session_id) {
280 l2tp_session_inc_refcount(session);
281 rcu_read_unlock_bh();
282
283 return session;
284 }
285 rcu_read_unlock_bh();
286
287 return NULL;
288 }
289 EXPORT_SYMBOL_GPL(l2tp_session_get);
290
l2tp_session_get_nth(struct l2tp_tunnel * tunnel,int nth)291 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
292 {
293 int hash;
294 struct l2tp_session *session;
295 int count = 0;
296
297 rcu_read_lock_bh();
298 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
299 hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
300 if (++count > nth) {
301 l2tp_session_inc_refcount(session);
302 rcu_read_unlock_bh();
303 return session;
304 }
305 }
306 }
307
308 rcu_read_unlock_bh();
309
310 return NULL;
311 }
312 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
313
314 /* Lookup a session by interface name.
315 * This is very inefficient but is only used by management interfaces.
316 */
l2tp_session_get_by_ifname(const struct net * net,const char * ifname)317 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
318 const char *ifname)
319 {
320 struct l2tp_net *pn = l2tp_pernet(net);
321 int hash;
322 struct l2tp_session *session;
323
324 rcu_read_lock_bh();
325 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
326 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
327 if (!strcmp(session->ifname, ifname)) {
328 l2tp_session_inc_refcount(session);
329 rcu_read_unlock_bh();
330
331 return session;
332 }
333 }
334 }
335
336 rcu_read_unlock_bh();
337
338 return NULL;
339 }
340 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
341
l2tp_session_register(struct l2tp_session * session,struct l2tp_tunnel * tunnel)342 int l2tp_session_register(struct l2tp_session *session,
343 struct l2tp_tunnel *tunnel)
344 {
345 struct l2tp_session *session_walk;
346 struct hlist_head *g_head;
347 struct hlist_head *head;
348 struct l2tp_net *pn;
349 int err;
350
351 head = l2tp_session_id_hash(tunnel, session->session_id);
352
353 spin_lock_bh(&tunnel->hlist_lock);
354 if (!tunnel->acpt_newsess) {
355 err = -ENODEV;
356 goto err_tlock;
357 }
358
359 hlist_for_each_entry(session_walk, head, hlist)
360 if (session_walk->session_id == session->session_id) {
361 err = -EEXIST;
362 goto err_tlock;
363 }
364
365 if (tunnel->version == L2TP_HDR_VER_3) {
366 pn = l2tp_pernet(tunnel->l2tp_net);
367 g_head = l2tp_session_id_hash_2(pn, session->session_id);
368
369 spin_lock_bh(&pn->l2tp_session_hlist_lock);
370
371 /* IP encap expects session IDs to be globally unique, while
372 * UDP encap doesn't.
373 */
374 hlist_for_each_entry(session_walk, g_head, global_hlist)
375 if (session_walk->session_id == session->session_id &&
376 (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
377 tunnel->encap == L2TP_ENCAPTYPE_IP)) {
378 err = -EEXIST;
379 goto err_tlock_pnlock;
380 }
381
382 l2tp_tunnel_inc_refcount(tunnel);
383 hlist_add_head_rcu(&session->global_hlist, g_head);
384
385 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
386 } else {
387 l2tp_tunnel_inc_refcount(tunnel);
388 }
389
390 WRITE_ONCE(session->tunnel, tunnel);
391 hlist_add_head_rcu(&session->hlist, head);
392 spin_unlock_bh(&tunnel->hlist_lock);
393
394 trace_register_session(session);
395
396 return 0;
397
398 err_tlock_pnlock:
399 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
400 err_tlock:
401 spin_unlock_bh(&tunnel->hlist_lock);
402
403 return err;
404 }
405 EXPORT_SYMBOL_GPL(l2tp_session_register);
406
407 /*****************************************************************************
408 * Receive data handling
409 *****************************************************************************/
410
411 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
412 * number.
413 */
l2tp_recv_queue_skb(struct l2tp_session * session,struct sk_buff * skb)414 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
415 {
416 struct sk_buff *skbp;
417 struct sk_buff *tmp;
418 u32 ns = L2TP_SKB_CB(skb)->ns;
419
420 spin_lock_bh(&session->reorder_q.lock);
421 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
422 if (L2TP_SKB_CB(skbp)->ns > ns) {
423 __skb_queue_before(&session->reorder_q, skbp, skb);
424 atomic_long_inc(&session->stats.rx_oos_packets);
425 goto out;
426 }
427 }
428
429 __skb_queue_tail(&session->reorder_q, skb);
430
431 out:
432 spin_unlock_bh(&session->reorder_q.lock);
433 }
434
435 /* Dequeue a single skb.
436 */
l2tp_recv_dequeue_skb(struct l2tp_session * session,struct sk_buff * skb)437 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
438 {
439 struct l2tp_tunnel *tunnel = session->tunnel;
440 int length = L2TP_SKB_CB(skb)->length;
441
442 /* We're about to requeue the skb, so return resources
443 * to its current owner (a socket receive buffer).
444 */
445 skb_orphan(skb);
446
447 atomic_long_inc(&tunnel->stats.rx_packets);
448 atomic_long_add(length, &tunnel->stats.rx_bytes);
449 atomic_long_inc(&session->stats.rx_packets);
450 atomic_long_add(length, &session->stats.rx_bytes);
451
452 if (L2TP_SKB_CB(skb)->has_seq) {
453 /* Bump our Nr */
454 session->nr++;
455 session->nr &= session->nr_max;
456 trace_session_seqnum_update(session);
457 }
458
459 /* call private receive handler */
460 if (session->recv_skb)
461 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
462 else
463 kfree_skb(skb);
464 }
465
466 /* Dequeue skbs from the session's reorder_q, subject to packet order.
467 * Skbs that have been in the queue for too long are simply discarded.
468 */
l2tp_recv_dequeue(struct l2tp_session * session)469 static void l2tp_recv_dequeue(struct l2tp_session *session)
470 {
471 struct sk_buff *skb;
472 struct sk_buff *tmp;
473
474 /* If the pkt at the head of the queue has the nr that we
475 * expect to send up next, dequeue it and any other
476 * in-sequence packets behind it.
477 */
478 start:
479 spin_lock_bh(&session->reorder_q.lock);
480 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
481 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
482
483 /* If the packet has been pending on the queue for too long, discard it */
484 if (time_after(jiffies, cb->expires)) {
485 atomic_long_inc(&session->stats.rx_seq_discards);
486 atomic_long_inc(&session->stats.rx_errors);
487 trace_session_pkt_expired(session, cb->ns);
488 session->reorder_skip = 1;
489 __skb_unlink(skb, &session->reorder_q);
490 kfree_skb(skb);
491 continue;
492 }
493
494 if (cb->has_seq) {
495 if (session->reorder_skip) {
496 session->reorder_skip = 0;
497 session->nr = cb->ns;
498 trace_session_seqnum_reset(session);
499 }
500 if (cb->ns != session->nr)
501 goto out;
502 }
503 __skb_unlink(skb, &session->reorder_q);
504
505 /* Process the skb. We release the queue lock while we
506 * do so to let other contexts process the queue.
507 */
508 spin_unlock_bh(&session->reorder_q.lock);
509 l2tp_recv_dequeue_skb(session, skb);
510 goto start;
511 }
512
513 out:
514 spin_unlock_bh(&session->reorder_q.lock);
515 }
516
l2tp_seq_check_rx_window(struct l2tp_session * session,u32 nr)517 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
518 {
519 u32 nws;
520
521 if (nr >= session->nr)
522 nws = nr - session->nr;
523 else
524 nws = (session->nr_max + 1) - (session->nr - nr);
525
526 return nws < session->nr_window_size;
527 }
528
529 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
530 * acceptable, else non-zero.
531 */
l2tp_recv_data_seq(struct l2tp_session * session,struct sk_buff * skb)532 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
533 {
534 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
535
536 if (!l2tp_seq_check_rx_window(session, cb->ns)) {
537 /* Packet sequence number is outside allowed window.
538 * Discard it.
539 */
540 trace_session_pkt_outside_rx_window(session, cb->ns);
541 goto discard;
542 }
543
544 if (session->reorder_timeout != 0) {
545 /* Packet reordering enabled. Add skb to session's
546 * reorder queue, in order of ns.
547 */
548 l2tp_recv_queue_skb(session, skb);
549 goto out;
550 }
551
552 /* Packet reordering disabled. Discard out-of-sequence packets, while
553 * tracking the number if in-sequence packets after the first OOS packet
554 * is seen. After nr_oos_count_max in-sequence packets, reset the
555 * sequence number to re-enable packet reception.
556 */
557 if (cb->ns == session->nr) {
558 skb_queue_tail(&session->reorder_q, skb);
559 } else {
560 u32 nr_oos = cb->ns;
561 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
562
563 if (nr_oos == nr_next)
564 session->nr_oos_count++;
565 else
566 session->nr_oos_count = 0;
567
568 session->nr_oos = nr_oos;
569 if (session->nr_oos_count > session->nr_oos_count_max) {
570 session->reorder_skip = 1;
571 }
572 if (!session->reorder_skip) {
573 atomic_long_inc(&session->stats.rx_seq_discards);
574 trace_session_pkt_oos(session, cb->ns);
575 goto discard;
576 }
577 skb_queue_tail(&session->reorder_q, skb);
578 }
579
580 out:
581 return 0;
582
583 discard:
584 return 1;
585 }
586
587 /* Do receive processing of L2TP data frames. We handle both L2TPv2
588 * and L2TPv3 data frames here.
589 *
590 * L2TPv2 Data Message Header
591 *
592 * 0 1 2 3
593 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
594 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
595 * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
596 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
597 * | Tunnel ID | Session ID |
598 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
599 * | Ns (opt) | Nr (opt) |
600 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
601 * | Offset Size (opt) | Offset pad... (opt)
602 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
603 *
604 * Data frames are marked by T=0. All other fields are the same as
605 * those in L2TP control frames.
606 *
607 * L2TPv3 Data Message Header
608 *
609 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
610 * | L2TP Session Header |
611 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
612 * | L2-Specific Sublayer |
613 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
614 * | Tunnel Payload ...
615 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
616 *
617 * L2TPv3 Session Header Over IP
618 *
619 * 0 1 2 3
620 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
621 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
622 * | Session ID |
623 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
624 * | Cookie (optional, maximum 64 bits)...
625 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
626 * |
627 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
628 *
629 * L2TPv3 L2-Specific Sublayer Format
630 *
631 * 0 1 2 3
632 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
633 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
634 * |x|S|x|x|x|x|x|x| Sequence Number |
635 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
636 *
637 * Cookie value and sublayer format are negotiated with the peer when
638 * the session is set up. Unlike L2TPv2, we do not need to parse the
639 * packet header to determine if optional fields are present.
640 *
641 * Caller must already have parsed the frame and determined that it is
642 * a data (not control) frame before coming here. Fields up to the
643 * session-id have already been parsed and ptr points to the data
644 * after the session-id.
645 */
l2tp_recv_common(struct l2tp_session * session,struct sk_buff * skb,unsigned char * ptr,unsigned char * optr,u16 hdrflags,int length)646 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
647 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
648 int length)
649 {
650 struct l2tp_tunnel *tunnel = session->tunnel;
651 int offset;
652
653 /* Parse and check optional cookie */
654 if (session->peer_cookie_len > 0) {
655 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
656 pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
657 tunnel->name, tunnel->tunnel_id,
658 session->session_id);
659 atomic_long_inc(&session->stats.rx_cookie_discards);
660 goto discard;
661 }
662 ptr += session->peer_cookie_len;
663 }
664
665 /* Handle the optional sequence numbers. Sequence numbers are
666 * in different places for L2TPv2 and L2TPv3.
667 *
668 * If we are the LAC, enable/disable sequence numbers under
669 * the control of the LNS. If no sequence numbers present but
670 * we were expecting them, discard frame.
671 */
672 L2TP_SKB_CB(skb)->has_seq = 0;
673 if (tunnel->version == L2TP_HDR_VER_2) {
674 if (hdrflags & L2TP_HDRFLAG_S) {
675 /* Store L2TP info in the skb */
676 L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
677 L2TP_SKB_CB(skb)->has_seq = 1;
678 ptr += 2;
679 /* Skip past nr in the header */
680 ptr += 2;
681
682 }
683 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
684 u32 l2h = ntohl(*(__be32 *)ptr);
685
686 if (l2h & 0x40000000) {
687 /* Store L2TP info in the skb */
688 L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
689 L2TP_SKB_CB(skb)->has_seq = 1;
690 }
691 ptr += 4;
692 }
693
694 if (L2TP_SKB_CB(skb)->has_seq) {
695 /* Received a packet with sequence numbers. If we're the LAC,
696 * check if we sre sending sequence numbers and if not,
697 * configure it so.
698 */
699 if (!session->lns_mode && !session->send_seq) {
700 trace_session_seqnum_lns_enable(session);
701 session->send_seq = 1;
702 l2tp_session_set_header_len(session, tunnel->version,
703 tunnel->encap);
704 }
705 } else {
706 /* No sequence numbers.
707 * If user has configured mandatory sequence numbers, discard.
708 */
709 if (session->recv_seq) {
710 pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
711 session->name);
712 atomic_long_inc(&session->stats.rx_seq_discards);
713 goto discard;
714 }
715
716 /* If we're the LAC and we're sending sequence numbers, the
717 * LNS has requested that we no longer send sequence numbers.
718 * If we're the LNS and we're sending sequence numbers, the
719 * LAC is broken. Discard the frame.
720 */
721 if (!session->lns_mode && session->send_seq) {
722 trace_session_seqnum_lns_disable(session);
723 session->send_seq = 0;
724 l2tp_session_set_header_len(session, tunnel->version,
725 tunnel->encap);
726 } else if (session->send_seq) {
727 pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
728 session->name);
729 atomic_long_inc(&session->stats.rx_seq_discards);
730 goto discard;
731 }
732 }
733
734 /* Session data offset is defined only for L2TPv2 and is
735 * indicated by an optional 16-bit value in the header.
736 */
737 if (tunnel->version == L2TP_HDR_VER_2) {
738 /* If offset bit set, skip it. */
739 if (hdrflags & L2TP_HDRFLAG_O) {
740 offset = ntohs(*(__be16 *)ptr);
741 ptr += 2 + offset;
742 }
743 }
744
745 offset = ptr - optr;
746 if (!pskb_may_pull(skb, offset))
747 goto discard;
748
749 __skb_pull(skb, offset);
750
751 /* Prepare skb for adding to the session's reorder_q. Hold
752 * packets for max reorder_timeout or 1 second if not
753 * reordering.
754 */
755 L2TP_SKB_CB(skb)->length = length;
756 L2TP_SKB_CB(skb)->expires = jiffies +
757 (session->reorder_timeout ? session->reorder_timeout : HZ);
758
759 /* Add packet to the session's receive queue. Reordering is done here, if
760 * enabled. Saved L2TP protocol info is stored in skb->sb[].
761 */
762 if (L2TP_SKB_CB(skb)->has_seq) {
763 if (l2tp_recv_data_seq(session, skb))
764 goto discard;
765 } else {
766 /* No sequence numbers. Add the skb to the tail of the
767 * reorder queue. This ensures that it will be
768 * delivered after all previous sequenced skbs.
769 */
770 skb_queue_tail(&session->reorder_q, skb);
771 }
772
773 /* Try to dequeue as many skbs from reorder_q as we can. */
774 l2tp_recv_dequeue(session);
775
776 return;
777
778 discard:
779 atomic_long_inc(&session->stats.rx_errors);
780 kfree_skb(skb);
781 }
782 EXPORT_SYMBOL_GPL(l2tp_recv_common);
783
784 /* Drop skbs from the session's reorder_q
785 */
l2tp_session_queue_purge(struct l2tp_session * session)786 static void l2tp_session_queue_purge(struct l2tp_session *session)
787 {
788 struct sk_buff *skb = NULL;
789
790 while ((skb = skb_dequeue(&session->reorder_q))) {
791 atomic_long_inc(&session->stats.rx_errors);
792 kfree_skb(skb);
793 }
794 }
795
796 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
797 * here. The skb is not on a list when we get here.
798 * Returns 0 if the packet was a data packet and was successfully passed on.
799 * Returns 1 if the packet was not a good data packet and could not be
800 * forwarded. All such packets are passed up to userspace to deal with.
801 */
l2tp_udp_recv_core(struct l2tp_tunnel * tunnel,struct sk_buff * skb)802 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
803 {
804 struct l2tp_session *session = NULL;
805 unsigned char *ptr, *optr;
806 u16 hdrflags;
807 u32 tunnel_id, session_id;
808 u16 version;
809 int length;
810
811 /* UDP has verified checksum */
812
813 /* UDP always verifies the packet length. */
814 __skb_pull(skb, sizeof(struct udphdr));
815
816 /* Short packet? */
817 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
818 pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
819 tunnel->name, skb->len);
820 goto invalid;
821 }
822
823 /* Point to L2TP header */
824 optr = skb->data;
825 ptr = skb->data;
826
827 /* Get L2TP header flags */
828 hdrflags = ntohs(*(__be16 *)ptr);
829
830 /* Check protocol version */
831 version = hdrflags & L2TP_HDR_VER_MASK;
832 if (version != tunnel->version) {
833 pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
834 tunnel->name, version, tunnel->version);
835 goto invalid;
836 }
837
838 /* Get length of L2TP packet */
839 length = skb->len;
840
841 /* If type is control packet, it is handled by userspace. */
842 if (hdrflags & L2TP_HDRFLAG_T)
843 goto pass;
844
845 /* Skip flags */
846 ptr += 2;
847
848 if (tunnel->version == L2TP_HDR_VER_2) {
849 /* If length is present, skip it */
850 if (hdrflags & L2TP_HDRFLAG_L)
851 ptr += 2;
852
853 /* Extract tunnel and session ID */
854 tunnel_id = ntohs(*(__be16 *)ptr);
855 ptr += 2;
856 session_id = ntohs(*(__be16 *)ptr);
857 ptr += 2;
858 } else {
859 ptr += 2; /* skip reserved bits */
860 tunnel_id = tunnel->tunnel_id;
861 session_id = ntohl(*(__be32 *)ptr);
862 ptr += 4;
863 }
864
865 /* Find the session context */
866 session = l2tp_tunnel_get_session(tunnel, session_id);
867 if (!session || !session->recv_skb) {
868 if (session)
869 l2tp_session_dec_refcount(session);
870
871 /* Not found? Pass to userspace to deal with */
872 pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
873 tunnel->name, tunnel_id, session_id);
874 goto pass;
875 }
876
877 if (tunnel->version == L2TP_HDR_VER_3 &&
878 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
879 l2tp_session_dec_refcount(session);
880 goto invalid;
881 }
882
883 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
884 l2tp_session_dec_refcount(session);
885
886 return 0;
887
888 invalid:
889 atomic_long_inc(&tunnel->stats.rx_invalid);
890
891 pass:
892 /* Put UDP header back */
893 __skb_push(skb, sizeof(struct udphdr));
894
895 return 1;
896 }
897
898 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
899 * Return codes:
900 * 0 : success.
901 * <0: error
902 * >0: skb should be passed up to userspace as UDP.
903 */
l2tp_udp_encap_recv(struct sock * sk,struct sk_buff * skb)904 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
905 {
906 struct l2tp_tunnel *tunnel;
907
908 /* Note that this is called from the encap_rcv hook inside an
909 * RCU-protected region, but without the socket being locked.
910 * Hence we use rcu_dereference_sk_user_data to access the
911 * tunnel data structure rather the usual l2tp_sk_to_tunnel
912 * accessor function.
913 */
914 tunnel = rcu_dereference_sk_user_data(sk);
915 if (!tunnel)
916 goto pass_up;
917 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
918 goto pass_up;
919
920 if (l2tp_udp_recv_core(tunnel, skb))
921 goto pass_up;
922
923 return 0;
924
925 pass_up:
926 return 1;
927 }
928 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
929
930 /************************************************************************
931 * Transmit handling
932 ***********************************************************************/
933
934 /* Build an L2TP header for the session into the buffer provided.
935 */
l2tp_build_l2tpv2_header(struct l2tp_session * session,void * buf)936 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
937 {
938 struct l2tp_tunnel *tunnel = session->tunnel;
939 __be16 *bufp = buf;
940 __be16 *optr = buf;
941 u16 flags = L2TP_HDR_VER_2;
942 u32 tunnel_id = tunnel->peer_tunnel_id;
943 u32 session_id = session->peer_session_id;
944
945 if (session->send_seq)
946 flags |= L2TP_HDRFLAG_S;
947
948 /* Setup L2TP header. */
949 *bufp++ = htons(flags);
950 *bufp++ = htons(tunnel_id);
951 *bufp++ = htons(session_id);
952 if (session->send_seq) {
953 *bufp++ = htons(session->ns);
954 *bufp++ = 0;
955 session->ns++;
956 session->ns &= 0xffff;
957 trace_session_seqnum_update(session);
958 }
959
960 return bufp - optr;
961 }
962
l2tp_build_l2tpv3_header(struct l2tp_session * session,void * buf)963 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
964 {
965 struct l2tp_tunnel *tunnel = session->tunnel;
966 char *bufp = buf;
967 char *optr = bufp;
968
969 /* Setup L2TP header. The header differs slightly for UDP and
970 * IP encapsulations. For UDP, there is 4 bytes of flags.
971 */
972 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
973 u16 flags = L2TP_HDR_VER_3;
974 *((__be16 *)bufp) = htons(flags);
975 bufp += 2;
976 *((__be16 *)bufp) = 0;
977 bufp += 2;
978 }
979
980 *((__be32 *)bufp) = htonl(session->peer_session_id);
981 bufp += 4;
982 if (session->cookie_len) {
983 memcpy(bufp, &session->cookie[0], session->cookie_len);
984 bufp += session->cookie_len;
985 }
986 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
987 u32 l2h = 0;
988
989 if (session->send_seq) {
990 l2h = 0x40000000 | session->ns;
991 session->ns++;
992 session->ns &= 0xffffff;
993 trace_session_seqnum_update(session);
994 }
995
996 *((__be32 *)bufp) = htonl(l2h);
997 bufp += 4;
998 }
999
1000 return bufp - optr;
1001 }
1002
1003 /* Queue the packet to IP for output: tunnel socket lock must be held */
l2tp_xmit_queue(struct l2tp_tunnel * tunnel,struct sk_buff * skb,struct flowi * fl)1004 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1005 {
1006 int err;
1007
1008 skb->ignore_df = 1;
1009 skb_dst_drop(skb);
1010 #if IS_ENABLED(CONFIG_IPV6)
1011 if (l2tp_sk_is_v6(tunnel->sock))
1012 err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1013 else
1014 #endif
1015 err = ip_queue_xmit(tunnel->sock, skb, fl);
1016
1017 return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1018 }
1019
l2tp_xmit_core(struct l2tp_session * session,struct sk_buff * skb,unsigned int * len)1020 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1021 {
1022 struct l2tp_tunnel *tunnel = session->tunnel;
1023 unsigned int data_len = skb->len;
1024 struct sock *sk = tunnel->sock;
1025 int headroom, uhlen, udp_len;
1026 int ret = NET_XMIT_SUCCESS;
1027 struct inet_sock *inet;
1028 struct udphdr *uh;
1029
1030 /* Check that there's enough headroom in the skb to insert IP,
1031 * UDP and L2TP headers. If not enough, expand it to
1032 * make room. Adjust truesize.
1033 */
1034 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1035 headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1036 if (skb_cow_head(skb, headroom)) {
1037 kfree_skb(skb);
1038 return NET_XMIT_DROP;
1039 }
1040
1041 /* Setup L2TP header */
1042 if (tunnel->version == L2TP_HDR_VER_2)
1043 l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1044 else
1045 l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1046
1047 /* Reset skb netfilter state */
1048 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1049 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1050 nf_reset_ct(skb);
1051
1052 /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1053 * nested socket calls on the same lockdep socket class. This can
1054 * happen when data from a user socket is routed over l2tp, which uses
1055 * another userspace socket.
1056 */
1057 spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1058
1059 if (sock_owned_by_user(sk)) {
1060 kfree_skb(skb);
1061 ret = NET_XMIT_DROP;
1062 goto out_unlock;
1063 }
1064
1065 /* The user-space may change the connection status for the user-space
1066 * provided socket at run time: we must check it under the socket lock
1067 */
1068 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1069 kfree_skb(skb);
1070 ret = NET_XMIT_DROP;
1071 goto out_unlock;
1072 }
1073
1074 /* Report transmitted length before we add encap header, which keeps
1075 * statistics consistent for both UDP and IP encap tx/rx paths.
1076 */
1077 *len = skb->len;
1078
1079 inet = inet_sk(sk);
1080 switch (tunnel->encap) {
1081 case L2TP_ENCAPTYPE_UDP:
1082 /* Setup UDP header */
1083 __skb_push(skb, sizeof(*uh));
1084 skb_reset_transport_header(skb);
1085 uh = udp_hdr(skb);
1086 uh->source = inet->inet_sport;
1087 uh->dest = inet->inet_dport;
1088 udp_len = uhlen + session->hdr_len + data_len;
1089 uh->len = htons(udp_len);
1090
1091 /* Calculate UDP checksum if configured to do so */
1092 #if IS_ENABLED(CONFIG_IPV6)
1093 if (l2tp_sk_is_v6(sk))
1094 udp6_set_csum(udp_get_no_check6_tx(sk),
1095 skb, &inet6_sk(sk)->saddr,
1096 &sk->sk_v6_daddr, udp_len);
1097 else
1098 #endif
1099 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1100 inet->inet_daddr, udp_len);
1101 break;
1102
1103 case L2TP_ENCAPTYPE_IP:
1104 break;
1105 }
1106
1107 ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1108
1109 out_unlock:
1110 spin_unlock(&sk->sk_lock.slock);
1111
1112 return ret;
1113 }
1114
1115 /* If caller requires the skb to have a ppp header, the header must be
1116 * inserted in the skb data before calling this function.
1117 */
l2tp_xmit_skb(struct l2tp_session * session,struct sk_buff * skb)1118 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1119 {
1120 unsigned int len = 0;
1121 int ret;
1122
1123 ret = l2tp_xmit_core(session, skb, &len);
1124 if (ret == NET_XMIT_SUCCESS) {
1125 atomic_long_inc(&session->tunnel->stats.tx_packets);
1126 atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1127 atomic_long_inc(&session->stats.tx_packets);
1128 atomic_long_add(len, &session->stats.tx_bytes);
1129 } else {
1130 atomic_long_inc(&session->tunnel->stats.tx_errors);
1131 atomic_long_inc(&session->stats.tx_errors);
1132 }
1133 return ret;
1134 }
1135 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1136
1137 /*****************************************************************************
1138 * Tinnel and session create/destroy.
1139 *****************************************************************************/
1140
1141 /* Tunnel socket destruct hook.
1142 * The tunnel context is deleted only when all session sockets have been
1143 * closed.
1144 */
l2tp_tunnel_destruct(struct sock * sk)1145 static void l2tp_tunnel_destruct(struct sock *sk)
1146 {
1147 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1148
1149 if (!tunnel)
1150 goto end;
1151
1152 /* Disable udp encapsulation */
1153 switch (tunnel->encap) {
1154 case L2TP_ENCAPTYPE_UDP:
1155 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1156 WRITE_ONCE(udp_sk(sk)->encap_type, 0);
1157 udp_sk(sk)->encap_rcv = NULL;
1158 udp_sk(sk)->encap_destroy = NULL;
1159 break;
1160 case L2TP_ENCAPTYPE_IP:
1161 break;
1162 }
1163
1164 /* Remove hooks into tunnel socket */
1165 write_lock_bh(&sk->sk_callback_lock);
1166 sk->sk_destruct = tunnel->old_sk_destruct;
1167 sk->sk_user_data = NULL;
1168 write_unlock_bh(&sk->sk_callback_lock);
1169
1170 /* Call the original destructor */
1171 if (sk->sk_destruct)
1172 (*sk->sk_destruct)(sk);
1173
1174 kfree_rcu(tunnel, rcu);
1175 end:
1176 return;
1177 }
1178
1179 /* Remove an l2tp session from l2tp_core's hash lists. */
l2tp_session_unhash(struct l2tp_session * session)1180 static void l2tp_session_unhash(struct l2tp_session *session)
1181 {
1182 struct l2tp_tunnel *tunnel = session->tunnel;
1183
1184 /* Remove the session from core hashes */
1185 if (tunnel) {
1186 /* Remove from the per-tunnel hash */
1187 spin_lock_bh(&tunnel->hlist_lock);
1188 hlist_del_init_rcu(&session->hlist);
1189 spin_unlock_bh(&tunnel->hlist_lock);
1190
1191 /* For L2TPv3 we have a per-net hash: remove from there, too */
1192 if (tunnel->version != L2TP_HDR_VER_2) {
1193 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1194
1195 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1196 hlist_del_init_rcu(&session->global_hlist);
1197 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1198 }
1199
1200 synchronize_rcu();
1201 }
1202 }
1203
1204 /* When the tunnel is closed, all the attached sessions need to go too.
1205 */
l2tp_tunnel_closeall(struct l2tp_tunnel * tunnel)1206 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1207 {
1208 struct l2tp_session *session;
1209 int hash;
1210
1211 spin_lock_bh(&tunnel->hlist_lock);
1212 tunnel->acpt_newsess = false;
1213 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1214 again:
1215 hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
1216 hlist_del_init_rcu(&session->hlist);
1217
1218 spin_unlock_bh(&tunnel->hlist_lock);
1219 l2tp_session_delete(session);
1220 spin_lock_bh(&tunnel->hlist_lock);
1221
1222 /* Now restart from the beginning of this hash
1223 * chain. We always remove a session from the
1224 * list so we are guaranteed to make forward
1225 * progress.
1226 */
1227 goto again;
1228 }
1229 }
1230 spin_unlock_bh(&tunnel->hlist_lock);
1231 }
1232
1233 /* Tunnel socket destroy hook for UDP encapsulation */
l2tp_udp_encap_destroy(struct sock * sk)1234 static void l2tp_udp_encap_destroy(struct sock *sk)
1235 {
1236 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1237
1238 if (tunnel)
1239 l2tp_tunnel_delete(tunnel);
1240 }
1241
l2tp_tunnel_remove(struct net * net,struct l2tp_tunnel * tunnel)1242 static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1243 {
1244 struct l2tp_net *pn = l2tp_pernet(net);
1245
1246 spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1247 idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1248 spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1249 }
1250
1251 /* Workqueue tunnel deletion function */
l2tp_tunnel_del_work(struct work_struct * work)1252 static void l2tp_tunnel_del_work(struct work_struct *work)
1253 {
1254 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1255 del_work);
1256 struct sock *sk = tunnel->sock;
1257 struct socket *sock = sk->sk_socket;
1258
1259 l2tp_tunnel_closeall(tunnel);
1260
1261 /* If the tunnel socket was created within the kernel, use
1262 * the sk API to release it here.
1263 */
1264 if (tunnel->fd < 0) {
1265 if (sock) {
1266 kernel_sock_shutdown(sock, SHUT_RDWR);
1267 sock_release(sock);
1268 }
1269 }
1270
1271 l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1272 /* drop initial ref */
1273 l2tp_tunnel_dec_refcount(tunnel);
1274
1275 /* drop workqueue ref */
1276 l2tp_tunnel_dec_refcount(tunnel);
1277 }
1278
1279 /* Create a socket for the tunnel, if one isn't set up by
1280 * userspace. This is used for static tunnels where there is no
1281 * managing L2TP daemon.
1282 *
1283 * Since we don't want these sockets to keep a namespace alive by
1284 * themselves, we drop the socket's namespace refcount after creation.
1285 * These sockets are freed when the namespace exits using the pernet
1286 * exit hook.
1287 */
l2tp_tunnel_sock_create(struct net * net,u32 tunnel_id,u32 peer_tunnel_id,struct l2tp_tunnel_cfg * cfg,struct socket ** sockp)1288 static int l2tp_tunnel_sock_create(struct net *net,
1289 u32 tunnel_id,
1290 u32 peer_tunnel_id,
1291 struct l2tp_tunnel_cfg *cfg,
1292 struct socket **sockp)
1293 {
1294 int err = -EINVAL;
1295 struct socket *sock = NULL;
1296 struct udp_port_cfg udp_conf;
1297
1298 switch (cfg->encap) {
1299 case L2TP_ENCAPTYPE_UDP:
1300 memset(&udp_conf, 0, sizeof(udp_conf));
1301
1302 #if IS_ENABLED(CONFIG_IPV6)
1303 if (cfg->local_ip6 && cfg->peer_ip6) {
1304 udp_conf.family = AF_INET6;
1305 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1306 sizeof(udp_conf.local_ip6));
1307 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1308 sizeof(udp_conf.peer_ip6));
1309 udp_conf.use_udp6_tx_checksums =
1310 !cfg->udp6_zero_tx_checksums;
1311 udp_conf.use_udp6_rx_checksums =
1312 !cfg->udp6_zero_rx_checksums;
1313 } else
1314 #endif
1315 {
1316 udp_conf.family = AF_INET;
1317 udp_conf.local_ip = cfg->local_ip;
1318 udp_conf.peer_ip = cfg->peer_ip;
1319 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1320 }
1321
1322 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1323 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1324
1325 err = udp_sock_create(net, &udp_conf, &sock);
1326 if (err < 0)
1327 goto out;
1328
1329 break;
1330
1331 case L2TP_ENCAPTYPE_IP:
1332 #if IS_ENABLED(CONFIG_IPV6)
1333 if (cfg->local_ip6 && cfg->peer_ip6) {
1334 struct sockaddr_l2tpip6 ip6_addr = {0};
1335
1336 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1337 IPPROTO_L2TP, &sock);
1338 if (err < 0)
1339 goto out;
1340
1341 ip6_addr.l2tp_family = AF_INET6;
1342 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1343 sizeof(ip6_addr.l2tp_addr));
1344 ip6_addr.l2tp_conn_id = tunnel_id;
1345 err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1346 sizeof(ip6_addr));
1347 if (err < 0)
1348 goto out;
1349
1350 ip6_addr.l2tp_family = AF_INET6;
1351 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1352 sizeof(ip6_addr.l2tp_addr));
1353 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1354 err = kernel_connect(sock,
1355 (struct sockaddr *)&ip6_addr,
1356 sizeof(ip6_addr), 0);
1357 if (err < 0)
1358 goto out;
1359 } else
1360 #endif
1361 {
1362 struct sockaddr_l2tpip ip_addr = {0};
1363
1364 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1365 IPPROTO_L2TP, &sock);
1366 if (err < 0)
1367 goto out;
1368
1369 ip_addr.l2tp_family = AF_INET;
1370 ip_addr.l2tp_addr = cfg->local_ip;
1371 ip_addr.l2tp_conn_id = tunnel_id;
1372 err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1373 sizeof(ip_addr));
1374 if (err < 0)
1375 goto out;
1376
1377 ip_addr.l2tp_family = AF_INET;
1378 ip_addr.l2tp_addr = cfg->peer_ip;
1379 ip_addr.l2tp_conn_id = peer_tunnel_id;
1380 err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1381 sizeof(ip_addr), 0);
1382 if (err < 0)
1383 goto out;
1384 }
1385 break;
1386
1387 default:
1388 goto out;
1389 }
1390
1391 out:
1392 *sockp = sock;
1393 if (err < 0 && sock) {
1394 kernel_sock_shutdown(sock, SHUT_RDWR);
1395 sock_release(sock);
1396 *sockp = NULL;
1397 }
1398
1399 return err;
1400 }
1401
l2tp_tunnel_create(int fd,int version,u32 tunnel_id,u32 peer_tunnel_id,struct l2tp_tunnel_cfg * cfg,struct l2tp_tunnel ** tunnelp)1402 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1403 struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1404 {
1405 struct l2tp_tunnel *tunnel = NULL;
1406 int err;
1407 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1408
1409 if (cfg)
1410 encap = cfg->encap;
1411
1412 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1413 if (!tunnel) {
1414 err = -ENOMEM;
1415 goto err;
1416 }
1417
1418 tunnel->version = version;
1419 tunnel->tunnel_id = tunnel_id;
1420 tunnel->peer_tunnel_id = peer_tunnel_id;
1421
1422 tunnel->magic = L2TP_TUNNEL_MAGIC;
1423 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1424 spin_lock_init(&tunnel->hlist_lock);
1425 tunnel->acpt_newsess = true;
1426
1427 tunnel->encap = encap;
1428
1429 refcount_set(&tunnel->ref_count, 1);
1430 tunnel->fd = fd;
1431
1432 /* Init delete workqueue struct */
1433 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1434
1435 INIT_LIST_HEAD(&tunnel->list);
1436
1437 err = 0;
1438 err:
1439 if (tunnelp)
1440 *tunnelp = tunnel;
1441
1442 return err;
1443 }
1444 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1445
l2tp_validate_socket(const struct sock * sk,const struct net * net,enum l2tp_encap_type encap)1446 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1447 enum l2tp_encap_type encap)
1448 {
1449 if (!net_eq(sock_net(sk), net))
1450 return -EINVAL;
1451
1452 if (sk->sk_type != SOCK_DGRAM)
1453 return -EPROTONOSUPPORT;
1454
1455 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1456 return -EPROTONOSUPPORT;
1457
1458 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1459 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1460 return -EPROTONOSUPPORT;
1461
1462 if (sk->sk_user_data)
1463 return -EBUSY;
1464
1465 return 0;
1466 }
1467
l2tp_tunnel_register(struct l2tp_tunnel * tunnel,struct net * net,struct l2tp_tunnel_cfg * cfg)1468 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1469 struct l2tp_tunnel_cfg *cfg)
1470 {
1471 struct l2tp_net *pn = l2tp_pernet(net);
1472 u32 tunnel_id = tunnel->tunnel_id;
1473 struct socket *sock;
1474 struct sock *sk;
1475 int ret;
1476
1477 spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1478 ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1479 GFP_ATOMIC);
1480 spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1481 if (ret)
1482 return ret == -ENOSPC ? -EEXIST : ret;
1483
1484 if (tunnel->fd < 0) {
1485 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1486 tunnel->peer_tunnel_id, cfg,
1487 &sock);
1488 if (ret < 0)
1489 goto err;
1490 } else {
1491 sock = sockfd_lookup(tunnel->fd, &ret);
1492 if (!sock)
1493 goto err;
1494 }
1495
1496 sk = sock->sk;
1497 lock_sock(sk);
1498 write_lock_bh(&sk->sk_callback_lock);
1499 ret = l2tp_validate_socket(sk, net, tunnel->encap);
1500 if (ret < 0)
1501 goto err_inval_sock;
1502 rcu_assign_sk_user_data(sk, tunnel);
1503 write_unlock_bh(&sk->sk_callback_lock);
1504
1505 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1506 struct udp_tunnel_sock_cfg udp_cfg = {
1507 .sk_user_data = tunnel,
1508 .encap_type = UDP_ENCAP_L2TPINUDP,
1509 .encap_rcv = l2tp_udp_encap_recv,
1510 .encap_destroy = l2tp_udp_encap_destroy,
1511 };
1512
1513 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1514 }
1515
1516 tunnel->old_sk_destruct = sk->sk_destruct;
1517 sk->sk_destruct = &l2tp_tunnel_destruct;
1518 sk->sk_allocation = GFP_ATOMIC;
1519 release_sock(sk);
1520
1521 sock_hold(sk);
1522 tunnel->sock = sk;
1523 tunnel->l2tp_net = net;
1524
1525 spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1526 idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1527 spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1528
1529 trace_register_tunnel(tunnel);
1530
1531 if (tunnel->fd >= 0)
1532 sockfd_put(sock);
1533
1534 return 0;
1535
1536 err_inval_sock:
1537 write_unlock_bh(&sk->sk_callback_lock);
1538 release_sock(sk);
1539
1540 if (tunnel->fd < 0)
1541 sock_release(sock);
1542 else
1543 sockfd_put(sock);
1544 err:
1545 l2tp_tunnel_remove(net, tunnel);
1546 return ret;
1547 }
1548 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1549
1550 /* This function is used by the netlink TUNNEL_DELETE command.
1551 */
l2tp_tunnel_delete(struct l2tp_tunnel * tunnel)1552 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1553 {
1554 if (!test_and_set_bit(0, &tunnel->dead)) {
1555 trace_delete_tunnel(tunnel);
1556 l2tp_tunnel_inc_refcount(tunnel);
1557 queue_work(l2tp_wq, &tunnel->del_work);
1558 }
1559 }
1560 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1561
l2tp_session_delete(struct l2tp_session * session)1562 void l2tp_session_delete(struct l2tp_session *session)
1563 {
1564 if (test_and_set_bit(0, &session->dead))
1565 return;
1566
1567 trace_delete_session(session);
1568 l2tp_session_unhash(session);
1569 l2tp_session_queue_purge(session);
1570 if (session->session_close)
1571 (*session->session_close)(session);
1572
1573 l2tp_session_dec_refcount(session);
1574 }
1575 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1576
1577 /* We come here whenever a session's send_seq, cookie_len or
1578 * l2specific_type parameters are set.
1579 */
l2tp_session_set_header_len(struct l2tp_session * session,int version,enum l2tp_encap_type encap)1580 void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1581 enum l2tp_encap_type encap)
1582 {
1583 if (version == L2TP_HDR_VER_2) {
1584 session->hdr_len = 6;
1585 if (session->send_seq)
1586 session->hdr_len += 4;
1587 } else {
1588 session->hdr_len = 4 + session->cookie_len;
1589 session->hdr_len += l2tp_get_l2specific_len(session);
1590 if (encap == L2TP_ENCAPTYPE_UDP)
1591 session->hdr_len += 4;
1592 }
1593 }
1594 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1595
l2tp_session_create(int priv_size,struct l2tp_tunnel * tunnel,u32 session_id,u32 peer_session_id,struct l2tp_session_cfg * cfg)1596 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1597 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1598 {
1599 struct l2tp_session *session;
1600
1601 session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1602 if (session) {
1603 session->magic = L2TP_SESSION_MAGIC;
1604
1605 session->session_id = session_id;
1606 session->peer_session_id = peer_session_id;
1607 session->nr = 0;
1608 if (tunnel->version == L2TP_HDR_VER_2)
1609 session->nr_max = 0xffff;
1610 else
1611 session->nr_max = 0xffffff;
1612 session->nr_window_size = session->nr_max / 2;
1613 session->nr_oos_count_max = 4;
1614
1615 /* Use NR of first received packet */
1616 session->reorder_skip = 1;
1617
1618 sprintf(&session->name[0], "sess %u/%u",
1619 tunnel->tunnel_id, session->session_id);
1620
1621 skb_queue_head_init(&session->reorder_q);
1622
1623 INIT_HLIST_NODE(&session->hlist);
1624 INIT_HLIST_NODE(&session->global_hlist);
1625
1626 if (cfg) {
1627 session->pwtype = cfg->pw_type;
1628 session->send_seq = cfg->send_seq;
1629 session->recv_seq = cfg->recv_seq;
1630 session->lns_mode = cfg->lns_mode;
1631 session->reorder_timeout = cfg->reorder_timeout;
1632 session->l2specific_type = cfg->l2specific_type;
1633 session->cookie_len = cfg->cookie_len;
1634 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1635 session->peer_cookie_len = cfg->peer_cookie_len;
1636 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1637 }
1638
1639 l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1640
1641 refcount_set(&session->ref_count, 1);
1642
1643 return session;
1644 }
1645
1646 return ERR_PTR(-ENOMEM);
1647 }
1648 EXPORT_SYMBOL_GPL(l2tp_session_create);
1649
1650 /*****************************************************************************
1651 * Init and cleanup
1652 *****************************************************************************/
1653
l2tp_init_net(struct net * net)1654 static __net_init int l2tp_init_net(struct net *net)
1655 {
1656 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1657 int hash;
1658
1659 idr_init(&pn->l2tp_tunnel_idr);
1660 spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1661
1662 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1663 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1664
1665 spin_lock_init(&pn->l2tp_session_hlist_lock);
1666
1667 return 0;
1668 }
1669
l2tp_exit_net(struct net * net)1670 static __net_exit void l2tp_exit_net(struct net *net)
1671 {
1672 struct l2tp_net *pn = l2tp_pernet(net);
1673 struct l2tp_tunnel *tunnel = NULL;
1674 unsigned long tunnel_id, tmp;
1675 int hash;
1676
1677 rcu_read_lock_bh();
1678 idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1679 if (tunnel)
1680 l2tp_tunnel_delete(tunnel);
1681 }
1682 rcu_read_unlock_bh();
1683
1684 if (l2tp_wq)
1685 flush_workqueue(l2tp_wq);
1686 rcu_barrier();
1687
1688 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1689 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1690 idr_destroy(&pn->l2tp_tunnel_idr);
1691 }
1692
1693 static struct pernet_operations l2tp_net_ops = {
1694 .init = l2tp_init_net,
1695 .exit = l2tp_exit_net,
1696 .id = &l2tp_net_id,
1697 .size = sizeof(struct l2tp_net),
1698 };
1699
l2tp_init(void)1700 static int __init l2tp_init(void)
1701 {
1702 int rc = 0;
1703
1704 rc = register_pernet_device(&l2tp_net_ops);
1705 if (rc)
1706 goto out;
1707
1708 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1709 if (!l2tp_wq) {
1710 pr_err("alloc_workqueue failed\n");
1711 unregister_pernet_device(&l2tp_net_ops);
1712 rc = -ENOMEM;
1713 goto out;
1714 }
1715
1716 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1717
1718 out:
1719 return rc;
1720 }
1721
l2tp_exit(void)1722 static void __exit l2tp_exit(void)
1723 {
1724 unregister_pernet_device(&l2tp_net_ops);
1725 if (l2tp_wq) {
1726 destroy_workqueue(l2tp_wq);
1727 l2tp_wq = NULL;
1728 }
1729 }
1730
1731 module_init(l2tp_init);
1732 module_exit(l2tp_exit);
1733
1734 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1735 MODULE_DESCRIPTION("L2TP core");
1736 MODULE_LICENSE("GPL");
1737 MODULE_VERSION(L2TP_DRV_VERSION);
1738