1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/string.h>
25#include <linux/list.h>
26#include <linux/rculist.h>
27#include <linux/uaccess.h>
28
29#include <linux/kernel.h>
30#include <linux/spinlock.h>
31#include <linux/kthread.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/errno.h>
35#include <linux/jiffies.h>
36
37#include <linux/netdevice.h>
38#include <linux/net.h>
39#include <linux/inetdevice.h>
40#include <linux/skbuff.h>
41#include <linux/init.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/udp.h>
45#include <linux/l2tp.h>
46#include <linux/hash.h>
47#include <linux/sort.h>
48#include <linux/file.h>
49#include <linux/nsproxy.h>
50#include <net/net_namespace.h>
51#include <net/netns/generic.h>
52#include <net/dst.h>
53#include <net/ip.h>
54#include <net/udp.h>
55#include <net/inet_common.h>
56#include <net/xfrm.h>
57#include <net/protocol.h>
58#include <net/inet6_connection_sock.h>
59#include <net/inet_ecn.h>
60#include <net/ip6_route.h>
61#include <net/ip6_checksum.h>
62
63#include <asm/byteorder.h>
64#include <linux/atomic.h>
65
66#include "l2tp_core.h"
67
68#define L2TP_DRV_VERSION "V2.0"
69
70
71#define L2TP_HDRFLAG_T 0x8000
72#define L2TP_HDRFLAG_L 0x4000
73#define L2TP_HDRFLAG_S 0x0800
74#define L2TP_HDRFLAG_O 0x0200
75#define L2TP_HDRFLAG_P 0x0100
76
77#define L2TP_HDR_VER_MASK 0x000F
78#define L2TP_HDR_VER_2 0x0002
79#define L2TP_HDR_VER_3 0x0003
80
81
82#define L2TP_SLFLAG_S 0x40000000
83#define L2TP_SL_SEQ_MASK 0x00ffffff
84
85#define L2TP_HDR_SIZE_SEQ 10
86#define L2TP_HDR_SIZE_NOSEQ 6
87
88
89#define L2TP_DEFAULT_DEBUG_FLAGS 0
90
91
92
93struct l2tp_skb_cb {
94 u32 ns;
95 u16 has_seq;
96 u16 length;
97 unsigned long expires;
98};
99
100#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102static atomic_t l2tp_tunnel_count;
103static atomic_t l2tp_session_count;
104static struct workqueue_struct *l2tp_wq;
105
106
107static unsigned int l2tp_net_id;
108struct l2tp_net {
109 struct list_head l2tp_tunnel_list;
110 spinlock_t l2tp_tunnel_list_lock;
111 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
112 spinlock_t l2tp_session_hlist_lock;
113};
114
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117
118static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119{
120 return sk->sk_user_data;
121}
122
123static inline struct l2tp_net *l2tp_pernet(struct net *net)
124{
125 BUG_ON(!net);
126
127 return net_generic(net, l2tp_net_id);
128}
129
130
131
132
133static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
134{
135 atomic_inc(&tunnel->ref_count);
136}
137
138static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
139{
140 if (atomic_dec_and_test(&tunnel->ref_count))
141 l2tp_tunnel_free(tunnel);
142}
143#ifdef L2TP_REFCNT_DEBUG
144#define l2tp_tunnel_inc_refcount(_t) \
145do { \
146 pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
147 __func__, __LINE__, (_t)->name, \
148 atomic_read(&_t->ref_count)); \
149 l2tp_tunnel_inc_refcount_1(_t); \
150} while (0)
151#define l2tp_tunnel_dec_refcount(_t)
152do { \
153 pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
154 __func__, __LINE__, (_t)->name, \
155 atomic_read(&_t->ref_count)); \
156 l2tp_tunnel_dec_refcount_1(_t); \
157} while (0)
158#else
159#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
160#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
161#endif
162
163
164
165
166
167
168static inline struct hlist_head *
169l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
170{
171 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
172
173}
174
175
176
177
178
179struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
180{
181 int err = 0;
182 struct socket *sock = NULL;
183 struct sock *sk = NULL;
184
185 if (!tunnel)
186 goto out;
187
188 if (tunnel->fd >= 0) {
189
190
191
192
193 sock = sockfd_lookup(tunnel->fd, &err);
194 if (sock)
195 sk = sock->sk;
196 } else {
197
198 sk = tunnel->sock;
199 sock_hold(sk);
200 }
201
202out:
203 return sk;
204}
205EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
206
207
208void l2tp_tunnel_sock_put(struct sock *sk)
209{
210 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
211 if (tunnel) {
212 if (tunnel->fd >= 0) {
213
214 sockfd_put(sk->sk_socket);
215 }
216 sock_put(sk);
217 }
218 sock_put(sk);
219}
220EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
221
222
223
224static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
225{
226 struct l2tp_net *pn = l2tp_pernet(net);
227 struct hlist_head *session_list =
228 l2tp_session_id_hash_2(pn, session_id);
229 struct l2tp_session *session;
230
231 rcu_read_lock_bh();
232 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
233 if (session->session_id == session_id) {
234 rcu_read_unlock_bh();
235 return session;
236 }
237 }
238 rcu_read_unlock_bh();
239
240 return NULL;
241}
242
243
244
245
246
247
248
249static inline struct hlist_head *
250l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
251{
252 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
253}
254
255
256
257struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
258{
259 struct hlist_head *session_list;
260 struct l2tp_session *session;
261
262
263
264
265
266 if (tunnel == NULL)
267 return l2tp_session_find_2(net, session_id);
268
269 session_list = l2tp_session_id_hash(tunnel, session_id);
270 read_lock_bh(&tunnel->hlist_lock);
271 hlist_for_each_entry(session, session_list, hlist) {
272 if (session->session_id == session_id) {
273 read_unlock_bh(&tunnel->hlist_lock);
274 return session;
275 }
276 }
277 read_unlock_bh(&tunnel->hlist_lock);
278
279 return NULL;
280}
281EXPORT_SYMBOL_GPL(l2tp_session_find);
282
283
284
285
286struct l2tp_session *l2tp_session_get(struct net *net,
287 struct l2tp_tunnel *tunnel,
288 u32 session_id, bool do_ref)
289{
290 struct hlist_head *session_list;
291 struct l2tp_session *session;
292
293 if (!tunnel) {
294 struct l2tp_net *pn = l2tp_pernet(net);
295
296 session_list = l2tp_session_id_hash_2(pn, session_id);
297
298 rcu_read_lock_bh();
299 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
300 if (session->session_id == session_id) {
301 l2tp_session_inc_refcount(session);
302 if (do_ref && session->ref)
303 session->ref(session);
304 rcu_read_unlock_bh();
305
306 return session;
307 }
308 }
309 rcu_read_unlock_bh();
310
311 return NULL;
312 }
313
314 session_list = l2tp_session_id_hash(tunnel, session_id);
315 read_lock_bh(&tunnel->hlist_lock);
316 hlist_for_each_entry(session, session_list, hlist) {
317 if (session->session_id == session_id) {
318 l2tp_session_inc_refcount(session);
319 if (do_ref && session->ref)
320 session->ref(session);
321 read_unlock_bh(&tunnel->hlist_lock);
322
323 return session;
324 }
325 }
326 read_unlock_bh(&tunnel->hlist_lock);
327
328 return NULL;
329}
330EXPORT_SYMBOL_GPL(l2tp_session_get);
331
332struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
333{
334 int hash;
335 struct l2tp_session *session;
336 int count = 0;
337
338 read_lock_bh(&tunnel->hlist_lock);
339 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
340 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
341 if (++count > nth) {
342 read_unlock_bh(&tunnel->hlist_lock);
343 return session;
344 }
345 }
346 }
347
348 read_unlock_bh(&tunnel->hlist_lock);
349
350 return NULL;
351}
352EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
353
354
355
356
357struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
358{
359 struct l2tp_net *pn = l2tp_pernet(net);
360 int hash;
361 struct l2tp_session *session;
362
363 rcu_read_lock_bh();
364 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
365 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
366 if (!strcmp(session->ifname, ifname)) {
367 rcu_read_unlock_bh();
368 return session;
369 }
370 }
371 }
372
373 rcu_read_unlock_bh();
374
375 return NULL;
376}
377EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
378
379static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
380 struct l2tp_session *session)
381{
382 struct l2tp_session *session_walk;
383 struct hlist_head *g_head;
384 struct hlist_head *head;
385 struct l2tp_net *pn;
386 int err;
387
388 head = l2tp_session_id_hash(tunnel, session->session_id);
389
390 write_lock_bh(&tunnel->hlist_lock);
391 if (!tunnel->acpt_newsess) {
392 err = -ENODEV;
393 goto err_tlock;
394 }
395
396 hlist_for_each_entry(session_walk, head, hlist)
397 if (session_walk->session_id == session->session_id) {
398 err = -EEXIST;
399 goto err_tlock;
400 }
401
402 if (tunnel->version == L2TP_HDR_VER_3) {
403 pn = l2tp_pernet(tunnel->l2tp_net);
404 g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
405 session->session_id);
406
407 spin_lock_bh(&pn->l2tp_session_hlist_lock);
408
409 hlist_for_each_entry(session_walk, g_head, global_hlist)
410 if (session_walk->session_id == session->session_id) {
411 err = -EEXIST;
412 goto err_tlock_pnlock;
413 }
414
415 l2tp_tunnel_inc_refcount(tunnel);
416 sock_hold(tunnel->sock);
417 hlist_add_head_rcu(&session->global_hlist, g_head);
418
419 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
420 } else {
421 l2tp_tunnel_inc_refcount(tunnel);
422 sock_hold(tunnel->sock);
423 }
424
425 hlist_add_head(&session->hlist, head);
426 write_unlock_bh(&tunnel->hlist_lock);
427
428 return 0;
429
430err_tlock_pnlock:
431 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
432err_tlock:
433 write_unlock_bh(&tunnel->hlist_lock);
434
435 return err;
436}
437
438
439
440struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
441{
442 struct l2tp_tunnel *tunnel;
443 struct l2tp_net *pn = l2tp_pernet(net);
444
445 rcu_read_lock_bh();
446 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
447 if (tunnel->tunnel_id == tunnel_id) {
448 rcu_read_unlock_bh();
449 return tunnel;
450 }
451 }
452 rcu_read_unlock_bh();
453
454 return NULL;
455}
456EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
457
458struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
459{
460 struct l2tp_net *pn = l2tp_pernet(net);
461 struct l2tp_tunnel *tunnel;
462 int count = 0;
463
464 rcu_read_lock_bh();
465 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
466 if (++count > nth) {
467 rcu_read_unlock_bh();
468 return tunnel;
469 }
470 }
471
472 rcu_read_unlock_bh();
473
474 return NULL;
475}
476EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
477
478
479
480
481
482
483
484
485static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
486{
487 struct sk_buff *skbp;
488 struct sk_buff *tmp;
489 u32 ns = L2TP_SKB_CB(skb)->ns;
490
491 spin_lock_bh(&session->reorder_q.lock);
492 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
493 if (L2TP_SKB_CB(skbp)->ns > ns) {
494 __skb_queue_before(&session->reorder_q, skbp, skb);
495 l2tp_dbg(session, L2TP_MSG_SEQ,
496 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
497 session->name, ns, L2TP_SKB_CB(skbp)->ns,
498 skb_queue_len(&session->reorder_q));
499 atomic_long_inc(&session->stats.rx_oos_packets);
500 goto out;
501 }
502 }
503
504 __skb_queue_tail(&session->reorder_q, skb);
505
506out:
507 spin_unlock_bh(&session->reorder_q.lock);
508}
509
510
511
512static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
513{
514 struct l2tp_tunnel *tunnel = session->tunnel;
515 int length = L2TP_SKB_CB(skb)->length;
516
517
518
519
520 skb_orphan(skb);
521
522 atomic_long_inc(&tunnel->stats.rx_packets);
523 atomic_long_add(length, &tunnel->stats.rx_bytes);
524 atomic_long_inc(&session->stats.rx_packets);
525 atomic_long_add(length, &session->stats.rx_bytes);
526
527 if (L2TP_SKB_CB(skb)->has_seq) {
528
529 session->nr++;
530 session->nr &= session->nr_max;
531
532 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
533 session->name, session->nr);
534 }
535
536
537 if (session->recv_skb != NULL)
538 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
539 else
540 kfree_skb(skb);
541
542 if (session->deref)
543 (*session->deref)(session);
544}
545
546
547
548
549static void l2tp_recv_dequeue(struct l2tp_session *session)
550{
551 struct sk_buff *skb;
552 struct sk_buff *tmp;
553
554
555
556
557
558start:
559 spin_lock_bh(&session->reorder_q.lock);
560 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
561 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
562 atomic_long_inc(&session->stats.rx_seq_discards);
563 atomic_long_inc(&session->stats.rx_errors);
564 l2tp_dbg(session, L2TP_MSG_SEQ,
565 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
566 session->name, L2TP_SKB_CB(skb)->ns,
567 L2TP_SKB_CB(skb)->length, session->nr,
568 skb_queue_len(&session->reorder_q));
569 session->reorder_skip = 1;
570 __skb_unlink(skb, &session->reorder_q);
571 kfree_skb(skb);
572 if (session->deref)
573 (*session->deref)(session);
574 continue;
575 }
576
577 if (L2TP_SKB_CB(skb)->has_seq) {
578 if (session->reorder_skip) {
579 l2tp_dbg(session, L2TP_MSG_SEQ,
580 "%s: advancing nr to next pkt: %u -> %u",
581 session->name, session->nr,
582 L2TP_SKB_CB(skb)->ns);
583 session->reorder_skip = 0;
584 session->nr = L2TP_SKB_CB(skb)->ns;
585 }
586 if (L2TP_SKB_CB(skb)->ns != session->nr) {
587 l2tp_dbg(session, L2TP_MSG_SEQ,
588 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
589 session->name, L2TP_SKB_CB(skb)->ns,
590 L2TP_SKB_CB(skb)->length, session->nr,
591 skb_queue_len(&session->reorder_q));
592 goto out;
593 }
594 }
595 __skb_unlink(skb, &session->reorder_q);
596
597
598
599
600 spin_unlock_bh(&session->reorder_q.lock);
601 l2tp_recv_dequeue_skb(session, skb);
602 goto start;
603 }
604
605out:
606 spin_unlock_bh(&session->reorder_q.lock);
607}
608
609static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
610{
611 u32 nws;
612
613 if (nr >= session->nr)
614 nws = nr - session->nr;
615 else
616 nws = (session->nr_max + 1) - (session->nr - nr);
617
618 return nws < session->nr_window_size;
619}
620
621
622
623
624static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
625{
626 if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
627
628
629
630 l2tp_dbg(session, L2TP_MSG_SEQ,
631 "%s: pkt %u len %d discarded, outside window, nr=%u\n",
632 session->name, L2TP_SKB_CB(skb)->ns,
633 L2TP_SKB_CB(skb)->length, session->nr);
634 goto discard;
635 }
636
637 if (session->reorder_timeout != 0) {
638
639
640
641 l2tp_recv_queue_skb(session, skb);
642 goto out;
643 }
644
645
646
647
648
649
650 if (L2TP_SKB_CB(skb)->ns == session->nr) {
651 skb_queue_tail(&session->reorder_q, skb);
652 } else {
653 u32 nr_oos = L2TP_SKB_CB(skb)->ns;
654 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
655
656 if (nr_oos == nr_next)
657 session->nr_oos_count++;
658 else
659 session->nr_oos_count = 0;
660
661 session->nr_oos = nr_oos;
662 if (session->nr_oos_count > session->nr_oos_count_max) {
663 session->reorder_skip = 1;
664 l2tp_dbg(session, L2TP_MSG_SEQ,
665 "%s: %d oos packets received. Resetting sequence numbers\n",
666 session->name, session->nr_oos_count);
667 }
668 if (!session->reorder_skip) {
669 atomic_long_inc(&session->stats.rx_seq_discards);
670 l2tp_dbg(session, L2TP_MSG_SEQ,
671 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
672 session->name, L2TP_SKB_CB(skb)->ns,
673 L2TP_SKB_CB(skb)->length, session->nr,
674 skb_queue_len(&session->reorder_q));
675 goto discard;
676 }
677 skb_queue_tail(&session->reorder_q, skb);
678 }
679
680out:
681 return 0;
682
683discard:
684 return 1;
685}
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
751 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
752 int length, int (*payload_hook)(struct sk_buff *skb))
753{
754 struct l2tp_tunnel *tunnel = session->tunnel;
755 int offset;
756 u32 ns, nr;
757
758
759 if (session->peer_cookie_len > 0) {
760 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
761 l2tp_info(tunnel, L2TP_MSG_DATA,
762 "%s: cookie mismatch (%u/%u). Discarding.\n",
763 tunnel->name, tunnel->tunnel_id,
764 session->session_id);
765 atomic_long_inc(&session->stats.rx_cookie_discards);
766 goto discard;
767 }
768 ptr += session->peer_cookie_len;
769 }
770
771
772
773
774
775
776
777
778 ns = nr = 0;
779 L2TP_SKB_CB(skb)->has_seq = 0;
780 if (tunnel->version == L2TP_HDR_VER_2) {
781 if (hdrflags & L2TP_HDRFLAG_S) {
782 ns = ntohs(*(__be16 *) ptr);
783 ptr += 2;
784 nr = ntohs(*(__be16 *) ptr);
785 ptr += 2;
786
787
788 L2TP_SKB_CB(skb)->ns = ns;
789 L2TP_SKB_CB(skb)->has_seq = 1;
790
791 l2tp_dbg(session, L2TP_MSG_SEQ,
792 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
793 session->name, ns, nr, session->nr);
794 }
795 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
796 u32 l2h = ntohl(*(__be32 *) ptr);
797
798 if (l2h & 0x40000000) {
799 ns = l2h & 0x00ffffff;
800
801
802 L2TP_SKB_CB(skb)->ns = ns;
803 L2TP_SKB_CB(skb)->has_seq = 1;
804
805 l2tp_dbg(session, L2TP_MSG_SEQ,
806 "%s: recv data ns=%u, session nr=%u\n",
807 session->name, ns, session->nr);
808 }
809 }
810
811
812 ptr += session->l2specific_len;
813
814 if (L2TP_SKB_CB(skb)->has_seq) {
815
816
817
818
819 if ((!session->lns_mode) && (!session->send_seq)) {
820 l2tp_info(session, L2TP_MSG_SEQ,
821 "%s: requested to enable seq numbers by LNS\n",
822 session->name);
823 session->send_seq = 1;
824 l2tp_session_set_header_len(session, tunnel->version);
825 }
826 } else {
827
828
829
830 if (session->recv_seq) {
831 l2tp_warn(session, L2TP_MSG_SEQ,
832 "%s: recv data has no seq numbers when required. Discarding.\n",
833 session->name);
834 atomic_long_inc(&session->stats.rx_seq_discards);
835 goto discard;
836 }
837
838
839
840
841
842
843 if ((!session->lns_mode) && (session->send_seq)) {
844 l2tp_info(session, L2TP_MSG_SEQ,
845 "%s: requested to disable seq numbers by LNS\n",
846 session->name);
847 session->send_seq = 0;
848 l2tp_session_set_header_len(session, tunnel->version);
849 } else if (session->send_seq) {
850 l2tp_warn(session, L2TP_MSG_SEQ,
851 "%s: recv data has no seq numbers when required. Discarding.\n",
852 session->name);
853 atomic_long_inc(&session->stats.rx_seq_discards);
854 goto discard;
855 }
856 }
857
858
859
860
861
862
863 if (tunnel->version == L2TP_HDR_VER_2) {
864
865 if (hdrflags & L2TP_HDRFLAG_O) {
866 offset = ntohs(*(__be16 *)ptr);
867 ptr += 2 + offset;
868 }
869 } else
870 ptr += session->offset;
871
872 offset = ptr - optr;
873 if (!pskb_may_pull(skb, offset))
874 goto discard;
875
876 __skb_pull(skb, offset);
877
878
879
880
881 if (payload_hook)
882 if ((*payload_hook)(skb))
883 goto discard;
884
885
886
887
888
889 L2TP_SKB_CB(skb)->length = length;
890 L2TP_SKB_CB(skb)->expires = jiffies +
891 (session->reorder_timeout ? session->reorder_timeout : HZ);
892
893
894
895
896 if (L2TP_SKB_CB(skb)->has_seq) {
897 if (l2tp_recv_data_seq(session, skb))
898 goto discard;
899 } else {
900
901
902
903
904 skb_queue_tail(&session->reorder_q, skb);
905 }
906
907
908 l2tp_recv_dequeue(session);
909
910 return;
911
912discard:
913 atomic_long_inc(&session->stats.rx_errors);
914 kfree_skb(skb);
915
916 if (session->deref)
917 (*session->deref)(session);
918}
919EXPORT_SYMBOL(l2tp_recv_common);
920
921
922
923int l2tp_session_queue_purge(struct l2tp_session *session)
924{
925 struct sk_buff *skb = NULL;
926 BUG_ON(!session);
927 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
928 while ((skb = skb_dequeue(&session->reorder_q))) {
929 atomic_long_inc(&session->stats.rx_errors);
930 kfree_skb(skb);
931 if (session->deref)
932 (*session->deref)(session);
933 }
934 return 0;
935}
936EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
937
938
939
940
941
942
943
944static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
945 int (*payload_hook)(struct sk_buff *skb))
946{
947 struct l2tp_session *session = NULL;
948 unsigned char *ptr, *optr;
949 u16 hdrflags;
950 u32 tunnel_id, session_id;
951 u16 version;
952 int length;
953
954
955
956
957 __skb_pull(skb, sizeof(struct udphdr));
958
959
960 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
961 l2tp_info(tunnel, L2TP_MSG_DATA,
962 "%s: recv short packet (len=%d)\n",
963 tunnel->name, skb->len);
964 goto error;
965 }
966
967
968 if (tunnel->debug & L2TP_MSG_DATA) {
969 length = min(32u, skb->len);
970 if (!pskb_may_pull(skb, length))
971 goto error;
972
973 pr_debug("%s: recv\n", tunnel->name);
974 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
975 }
976
977
978 optr = ptr = skb->data;
979
980
981 hdrflags = ntohs(*(__be16 *) ptr);
982
983
984 version = hdrflags & L2TP_HDR_VER_MASK;
985 if (version != tunnel->version) {
986 l2tp_info(tunnel, L2TP_MSG_DATA,
987 "%s: recv protocol version mismatch: got %d expected %d\n",
988 tunnel->name, version, tunnel->version);
989 goto error;
990 }
991
992
993 length = skb->len;
994
995
996 if (hdrflags & L2TP_HDRFLAG_T) {
997 l2tp_dbg(tunnel, L2TP_MSG_DATA,
998 "%s: recv control packet, len=%d\n",
999 tunnel->name, length);
1000 goto error;
1001 }
1002
1003
1004 ptr += 2;
1005
1006 if (tunnel->version == L2TP_HDR_VER_2) {
1007
1008 if (hdrflags & L2TP_HDRFLAG_L)
1009 ptr += 2;
1010
1011
1012 tunnel_id = ntohs(*(__be16 *) ptr);
1013 ptr += 2;
1014 session_id = ntohs(*(__be16 *) ptr);
1015 ptr += 2;
1016 } else {
1017 ptr += 2;
1018 tunnel_id = tunnel->tunnel_id;
1019 session_id = ntohl(*(__be32 *) ptr);
1020 ptr += 4;
1021 }
1022
1023
1024 session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
1025 if (!session || !session->recv_skb) {
1026 if (session) {
1027 if (session->deref)
1028 session->deref(session);
1029 l2tp_session_dec_refcount(session);
1030 }
1031
1032
1033 l2tp_info(tunnel, L2TP_MSG_DATA,
1034 "%s: no session found (%u/%u). Passing up.\n",
1035 tunnel->name, tunnel_id, session_id);
1036 goto error;
1037 }
1038
1039 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
1040 l2tp_session_dec_refcount(session);
1041
1042 return 0;
1043
1044error:
1045
1046 __skb_push(skb, sizeof(struct udphdr));
1047
1048 return 1;
1049}
1050
1051
1052
1053
1054
1055
1056
1057int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1058{
1059 struct l2tp_tunnel *tunnel;
1060
1061 tunnel = l2tp_sock_to_tunnel(sk);
1062 if (tunnel == NULL)
1063 goto pass_up;
1064
1065 l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
1066 tunnel->name, skb->len);
1067
1068 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
1069 goto pass_up_put;
1070
1071 sock_put(sk);
1072 return 0;
1073
1074pass_up_put:
1075 sock_put(sk);
1076pass_up:
1077 return 1;
1078}
1079EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1080
1081
1082
1083
1084
1085
1086
1087static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1088{
1089 struct l2tp_tunnel *tunnel = session->tunnel;
1090 __be16 *bufp = buf;
1091 __be16 *optr = buf;
1092 u16 flags = L2TP_HDR_VER_2;
1093 u32 tunnel_id = tunnel->peer_tunnel_id;
1094 u32 session_id = session->peer_session_id;
1095
1096 if (session->send_seq)
1097 flags |= L2TP_HDRFLAG_S;
1098
1099
1100 *bufp++ = htons(flags);
1101 *bufp++ = htons(tunnel_id);
1102 *bufp++ = htons(session_id);
1103 if (session->send_seq) {
1104 *bufp++ = htons(session->ns);
1105 *bufp++ = 0;
1106 session->ns++;
1107 session->ns &= 0xffff;
1108 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1109 session->name, session->ns);
1110 }
1111
1112 return bufp - optr;
1113}
1114
1115static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1116{
1117 struct l2tp_tunnel *tunnel = session->tunnel;
1118 char *bufp = buf;
1119 char *optr = bufp;
1120
1121
1122
1123
1124 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1125 u16 flags = L2TP_HDR_VER_3;
1126 *((__be16 *) bufp) = htons(flags);
1127 bufp += 2;
1128 *((__be16 *) bufp) = 0;
1129 bufp += 2;
1130 }
1131
1132 *((__be32 *) bufp) = htonl(session->peer_session_id);
1133 bufp += 4;
1134 if (session->cookie_len) {
1135 memcpy(bufp, &session->cookie[0], session->cookie_len);
1136 bufp += session->cookie_len;
1137 }
1138 if (session->l2specific_len) {
1139 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1140 u32 l2h = 0;
1141 if (session->send_seq) {
1142 l2h = 0x40000000 | session->ns;
1143 session->ns++;
1144 session->ns &= 0xffffff;
1145 l2tp_dbg(session, L2TP_MSG_SEQ,
1146 "%s: updated ns to %u\n",
1147 session->name, session->ns);
1148 }
1149
1150 *((__be32 *) bufp) = htonl(l2h);
1151 }
1152 bufp += session->l2specific_len;
1153 }
1154 if (session->offset)
1155 bufp += session->offset;
1156
1157 return bufp - optr;
1158}
1159
1160static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1161 struct flowi *fl, size_t data_len)
1162{
1163 struct l2tp_tunnel *tunnel = session->tunnel;
1164 unsigned int len = skb->len;
1165 int error;
1166
1167
1168 if (session->send_seq)
1169 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
1170 session->name, data_len, session->ns - 1);
1171 else
1172 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
1173 session->name, data_len);
1174
1175 if (session->debug & L2TP_MSG_DATA) {
1176 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1177 unsigned char *datap = skb->data + uhlen;
1178
1179 pr_debug("%s: xmit\n", session->name);
1180 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1181 datap, min_t(size_t, 32, len - uhlen));
1182 }
1183
1184
1185 skb->ignore_df = 1;
1186#if IS_ENABLED(CONFIG_IPV6)
1187 if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1188 error = inet6_csk_xmit(skb, NULL);
1189 else
1190#endif
1191 error = ip_queue_xmit(skb, fl);
1192
1193
1194 if (error >= 0) {
1195 atomic_long_inc(&tunnel->stats.tx_packets);
1196 atomic_long_add(len, &tunnel->stats.tx_bytes);
1197 atomic_long_inc(&session->stats.tx_packets);
1198 atomic_long_add(len, &session->stats.tx_bytes);
1199 } else {
1200 atomic_long_inc(&tunnel->stats.tx_errors);
1201 atomic_long_inc(&session->stats.tx_errors);
1202 }
1203
1204 return 0;
1205}
1206
1207
1208
1209static void l2tp_sock_wfree(struct sk_buff *skb)
1210{
1211 sock_put(skb->sk);
1212}
1213
1214
1215
1216
1217static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1218{
1219 sock_hold(sk);
1220 skb->sk = sk;
1221 skb->destructor = l2tp_sock_wfree;
1222}
1223
1224
1225
1226
1227int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1228{
1229 int data_len = skb->len;
1230 struct l2tp_tunnel *tunnel = session->tunnel;
1231 struct sock *sk = tunnel->sock;
1232 struct flowi *fl;
1233 struct udphdr *uh;
1234 struct inet_sock *inet;
1235 int headroom;
1236 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1237 int udp_len;
1238 int ret = NET_XMIT_SUCCESS;
1239
1240
1241
1242
1243
1244 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1245 uhlen + hdr_len;
1246 if (skb_cow_head(skb, headroom)) {
1247 kfree_skb(skb);
1248 return NET_XMIT_DROP;
1249 }
1250
1251 skb_orphan(skb);
1252
1253 session->build_header(session, __skb_push(skb, hdr_len));
1254
1255
1256 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1257 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1258 IPSKB_REROUTED);
1259 nf_reset(skb);
1260
1261 bh_lock_sock(sk);
1262 if (sock_owned_by_user(sk)) {
1263 kfree_skb(skb);
1264 ret = NET_XMIT_DROP;
1265 goto out_unlock;
1266 }
1267
1268
1269 skb_dst_drop(skb);
1270 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1271
1272 inet = inet_sk(sk);
1273 fl = &inet->cork.fl;
1274 switch (tunnel->encap) {
1275 case L2TP_ENCAPTYPE_UDP:
1276
1277 __skb_push(skb, sizeof(*uh));
1278 skb_reset_transport_header(skb);
1279 uh = udp_hdr(skb);
1280 uh->source = inet->inet_sport;
1281 uh->dest = inet->inet_dport;
1282 udp_len = uhlen + hdr_len + data_len;
1283 uh->len = htons(udp_len);
1284
1285
1286#if IS_ENABLED(CONFIG_IPV6)
1287 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1288 udp6_set_csum(udp_get_no_check6_tx(sk),
1289 skb, &inet6_sk(sk)->saddr,
1290 &sk->sk_v6_daddr, udp_len);
1291 else
1292#endif
1293 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1294 inet->inet_daddr, udp_len);
1295 break;
1296
1297 case L2TP_ENCAPTYPE_IP:
1298 break;
1299 }
1300
1301 l2tp_skb_set_owner_w(skb, sk);
1302
1303 l2tp_xmit_core(session, skb, fl, data_len);
1304out_unlock:
1305 bh_unlock_sock(sk);
1306
1307 return ret;
1308}
1309EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static void l2tp_tunnel_destruct(struct sock *sk)
1320{
1321 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1322 struct l2tp_net *pn;
1323
1324 if (tunnel == NULL)
1325 goto end;
1326
1327 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1328
1329
1330
1331 switch (tunnel->encap) {
1332 case L2TP_ENCAPTYPE_UDP:
1333
1334 (udp_sk(sk))->encap_type = 0;
1335 (udp_sk(sk))->encap_rcv = NULL;
1336 (udp_sk(sk))->encap_destroy = NULL;
1337 break;
1338 case L2TP_ENCAPTYPE_IP:
1339 break;
1340 }
1341
1342
1343 sk->sk_destruct = tunnel->old_sk_destruct;
1344 sk->sk_user_data = NULL;
1345
1346
1347 pn = l2tp_pernet(tunnel->l2tp_net);
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list);
1350 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1351 atomic_dec(&l2tp_tunnel_count);
1352
1353 l2tp_tunnel_closeall(tunnel);
1354
1355 tunnel->sock = NULL;
1356 l2tp_tunnel_dec_refcount(tunnel);
1357
1358
1359 if (sk->sk_destruct)
1360 (*sk->sk_destruct)(sk);
1361end:
1362 return;
1363}
1364
1365
1366
1367void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1368{
1369 int hash;
1370 struct hlist_node *walk;
1371 struct hlist_node *tmp;
1372 struct l2tp_session *session;
1373
1374 BUG_ON(tunnel == NULL);
1375
1376 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1377 tunnel->name);
1378
1379 write_lock_bh(&tunnel->hlist_lock);
1380 tunnel->acpt_newsess = false;
1381 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1382again:
1383 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1384 session = hlist_entry(walk, struct l2tp_session, hlist);
1385
1386 l2tp_info(session, L2TP_MSG_CONTROL,
1387 "%s: closing session\n", session->name);
1388
1389 hlist_del_init(&session->hlist);
1390
1391 if (session->ref != NULL)
1392 (*session->ref)(session);
1393
1394 write_unlock_bh(&tunnel->hlist_lock);
1395
1396 __l2tp_session_unhash(session);
1397 l2tp_session_queue_purge(session);
1398
1399 if (session->session_close != NULL)
1400 (*session->session_close)(session);
1401
1402 if (session->deref != NULL)
1403 (*session->deref)(session);
1404
1405 l2tp_session_dec_refcount(session);
1406
1407 write_lock_bh(&tunnel->hlist_lock);
1408
1409
1410
1411
1412
1413
1414 goto again;
1415 }
1416 }
1417 write_unlock_bh(&tunnel->hlist_lock);
1418}
1419EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1420
1421
1422static void l2tp_udp_encap_destroy(struct sock *sk)
1423{
1424 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1425 if (tunnel) {
1426 l2tp_tunnel_closeall(tunnel);
1427 sock_put(sk);
1428 }
1429}
1430
1431
1432
1433
1434static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1435{
1436 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1437 BUG_ON(tunnel->sock != NULL);
1438 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1439 kfree_rcu(tunnel, rcu);
1440}
1441
1442
1443static void l2tp_tunnel_del_work(struct work_struct *work)
1444{
1445 struct l2tp_tunnel *tunnel = NULL;
1446 struct socket *sock = NULL;
1447 struct sock *sk = NULL;
1448
1449 tunnel = container_of(work, struct l2tp_tunnel, del_work);
1450
1451 l2tp_tunnel_closeall(tunnel);
1452
1453 sk = l2tp_tunnel_sock_lookup(tunnel);
1454 if (!sk)
1455 goto out;
1456
1457 sock = sk->sk_socket;
1458
1459
1460
1461
1462
1463
1464
1465
1466 if (tunnel->fd >= 0) {
1467 if (sock)
1468 inet_shutdown(sock, 2);
1469 } else {
1470 if (sock)
1471 kernel_sock_shutdown(sock, SHUT_RDWR);
1472 sk_release_kernel(sk);
1473 }
1474
1475 l2tp_tunnel_sock_put(sk);
1476out:
1477 l2tp_tunnel_dec_refcount(tunnel);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static int l2tp_tunnel_sock_create(struct net *net,
1490 u32 tunnel_id,
1491 u32 peer_tunnel_id,
1492 struct l2tp_tunnel_cfg *cfg,
1493 struct socket **sockp)
1494{
1495 int err = -EINVAL;
1496 struct socket *sock = NULL;
1497 struct sockaddr_in udp_addr = {0};
1498 struct sockaddr_l2tpip ip_addr = {0};
1499#if IS_ENABLED(CONFIG_IPV6)
1500 struct sockaddr_in6 udp6_addr = {0};
1501 struct sockaddr_l2tpip6 ip6_addr = {0};
1502#endif
1503
1504 switch (cfg->encap) {
1505 case L2TP_ENCAPTYPE_UDP:
1506#if IS_ENABLED(CONFIG_IPV6)
1507 if (cfg->local_ip6 && cfg->peer_ip6) {
1508 err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1509 if (err < 0)
1510 goto out;
1511
1512 sk_change_net(sock->sk, net);
1513
1514 udp6_addr.sin6_family = AF_INET6;
1515 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1516 sizeof(udp6_addr.sin6_addr));
1517 udp6_addr.sin6_port = htons(cfg->local_udp_port);
1518 err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1519 sizeof(udp6_addr));
1520 if (err < 0)
1521 goto out;
1522
1523 udp6_addr.sin6_family = AF_INET6;
1524 memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1525 sizeof(udp6_addr.sin6_addr));
1526 udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1527 err = kernel_connect(sock,
1528 (struct sockaddr *) &udp6_addr,
1529 sizeof(udp6_addr), 0);
1530 if (err < 0)
1531 goto out;
1532
1533 if (cfg->udp6_zero_tx_checksums)
1534 udp_set_no_check6_tx(sock->sk, true);
1535 if (cfg->udp6_zero_rx_checksums)
1536 udp_set_no_check6_rx(sock->sk, true);
1537 } else
1538#endif
1539 {
1540 err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1541 if (err < 0)
1542 goto out;
1543
1544 sk_change_net(sock->sk, net);
1545
1546 udp_addr.sin_family = AF_INET;
1547 udp_addr.sin_addr = cfg->local_ip;
1548 udp_addr.sin_port = htons(cfg->local_udp_port);
1549 err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1550 sizeof(udp_addr));
1551 if (err < 0)
1552 goto out;
1553
1554 udp_addr.sin_family = AF_INET;
1555 udp_addr.sin_addr = cfg->peer_ip;
1556 udp_addr.sin_port = htons(cfg->peer_udp_port);
1557 err = kernel_connect(sock,
1558 (struct sockaddr *) &udp_addr,
1559 sizeof(udp_addr), 0);
1560 if (err < 0)
1561 goto out;
1562 }
1563
1564 if (!cfg->use_udp_checksums)
1565 sock->sk->sk_no_check_tx = 1;
1566
1567 inet_inc_convert_csum(sock->sk);
1568
1569 break;
1570
1571 case L2TP_ENCAPTYPE_IP:
1572#if IS_ENABLED(CONFIG_IPV6)
1573 if (cfg->local_ip6 && cfg->peer_ip6) {
1574 err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1575 IPPROTO_L2TP, &sock);
1576 if (err < 0)
1577 goto out;
1578
1579 sk_change_net(sock->sk, net);
1580
1581 ip6_addr.l2tp_family = AF_INET6;
1582 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1583 sizeof(ip6_addr.l2tp_addr));
1584 ip6_addr.l2tp_conn_id = tunnel_id;
1585 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1586 sizeof(ip6_addr));
1587 if (err < 0)
1588 goto out;
1589
1590 ip6_addr.l2tp_family = AF_INET6;
1591 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1592 sizeof(ip6_addr.l2tp_addr));
1593 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1594 err = kernel_connect(sock,
1595 (struct sockaddr *) &ip6_addr,
1596 sizeof(ip6_addr), 0);
1597 if (err < 0)
1598 goto out;
1599 } else
1600#endif
1601 {
1602 err = sock_create_kern(AF_INET, SOCK_DGRAM,
1603 IPPROTO_L2TP, &sock);
1604 if (err < 0)
1605 goto out;
1606
1607 sk_change_net(sock->sk, net);
1608
1609 ip_addr.l2tp_family = AF_INET;
1610 ip_addr.l2tp_addr = cfg->local_ip;
1611 ip_addr.l2tp_conn_id = tunnel_id;
1612 err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1613 sizeof(ip_addr));
1614 if (err < 0)
1615 goto out;
1616
1617 ip_addr.l2tp_family = AF_INET;
1618 ip_addr.l2tp_addr = cfg->peer_ip;
1619 ip_addr.l2tp_conn_id = peer_tunnel_id;
1620 err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1621 sizeof(ip_addr), 0);
1622 if (err < 0)
1623 goto out;
1624 }
1625 break;
1626
1627 default:
1628 goto out;
1629 }
1630
1631out:
1632 *sockp = sock;
1633 if ((err < 0) && sock) {
1634 kernel_sock_shutdown(sock, SHUT_RDWR);
1635 sk_release_kernel(sock->sk);
1636 *sockp = NULL;
1637 }
1638
1639 return err;
1640}
1641
1642static struct lock_class_key l2tp_socket_class;
1643
1644int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1645{
1646 struct l2tp_tunnel *tunnel = NULL;
1647 int err;
1648 struct socket *sock = NULL;
1649 struct sock *sk = NULL;
1650 struct l2tp_net *pn;
1651 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1652
1653
1654
1655
1656
1657 if (fd < 0) {
1658 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1659 cfg, &sock);
1660 if (err < 0)
1661 goto err;
1662 } else {
1663 sock = sockfd_lookup(fd, &err);
1664 if (!sock) {
1665 pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1666 tunnel_id, fd, err);
1667 err = -EBADF;
1668 goto err;
1669 }
1670
1671
1672 if (!net_eq(sock_net(sock->sk), net)) {
1673 pr_err("tunl %u: netns mismatch\n", tunnel_id);
1674 err = -EINVAL;
1675 goto err;
1676 }
1677 }
1678
1679 sk = sock->sk;
1680
1681 if (cfg != NULL)
1682 encap = cfg->encap;
1683
1684
1685 switch (encap) {
1686 case L2TP_ENCAPTYPE_UDP:
1687 err = -EPROTONOSUPPORT;
1688 if (sk->sk_protocol != IPPROTO_UDP) {
1689 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1690 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1691 goto err;
1692 }
1693 break;
1694 case L2TP_ENCAPTYPE_IP:
1695 err = -EPROTONOSUPPORT;
1696 if (sk->sk_protocol != IPPROTO_L2TP) {
1697 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1698 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1699 goto err;
1700 }
1701 break;
1702 }
1703
1704
1705 tunnel = l2tp_tunnel(sk);
1706 if (tunnel != NULL) {
1707
1708 err = -EBUSY;
1709 goto err;
1710 }
1711
1712 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1713 if (tunnel == NULL) {
1714 err = -ENOMEM;
1715 goto err;
1716 }
1717
1718 tunnel->version = version;
1719 tunnel->tunnel_id = tunnel_id;
1720 tunnel->peer_tunnel_id = peer_tunnel_id;
1721 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1722
1723 tunnel->magic = L2TP_TUNNEL_MAGIC;
1724 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1725 rwlock_init(&tunnel->hlist_lock);
1726 tunnel->acpt_newsess = true;
1727
1728
1729 tunnel->l2tp_net = net;
1730 pn = l2tp_pernet(net);
1731
1732 if (cfg != NULL)
1733 tunnel->debug = cfg->debug;
1734
1735#if IS_ENABLED(CONFIG_IPV6)
1736 if (sk->sk_family == PF_INET6) {
1737 struct ipv6_pinfo *np = inet6_sk(sk);
1738
1739 if (ipv6_addr_v4mapped(&np->saddr) &&
1740 ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1741 struct inet_sock *inet = inet_sk(sk);
1742
1743 tunnel->v4mapped = true;
1744 inet->inet_saddr = np->saddr.s6_addr32[3];
1745 inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1746 inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1747 } else {
1748 tunnel->v4mapped = false;
1749 }
1750 }
1751#endif
1752
1753
1754 tunnel->encap = encap;
1755 if (encap == L2TP_ENCAPTYPE_UDP) {
1756
1757 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1758 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1759 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1760#if IS_ENABLED(CONFIG_IPV6)
1761 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1762 udpv6_encap_enable();
1763 else
1764#endif
1765 udp_encap_enable();
1766 }
1767
1768 sk->sk_user_data = tunnel;
1769
1770
1771
1772
1773 tunnel->old_sk_destruct = sk->sk_destruct;
1774 sk->sk_destruct = &l2tp_tunnel_destruct;
1775 tunnel->sock = sk;
1776 tunnel->fd = fd;
1777 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1778
1779 sk->sk_allocation = GFP_ATOMIC;
1780
1781
1782 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1783
1784
1785 INIT_LIST_HEAD(&tunnel->list);
1786 atomic_inc(&l2tp_tunnel_count);
1787
1788
1789
1790
1791 l2tp_tunnel_inc_refcount(tunnel);
1792 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1793 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1794 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1795
1796 err = 0;
1797err:
1798 if (tunnelp)
1799 *tunnelp = tunnel;
1800
1801
1802
1803
1804 if (sock && sock->file)
1805 sockfd_put(sock);
1806
1807 return err;
1808}
1809EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1810
1811
1812
1813void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1814{
1815 if (!test_and_set_bit(0, &tunnel->dead)) {
1816 l2tp_tunnel_inc_refcount(tunnel);
1817 queue_work(l2tp_wq, &tunnel->del_work);
1818 }
1819}
1820EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1821
1822
1823
1824void l2tp_session_free(struct l2tp_session *session)
1825{
1826 struct l2tp_tunnel *tunnel = session->tunnel;
1827
1828 BUG_ON(atomic_read(&session->ref_count) != 0);
1829
1830 if (tunnel) {
1831 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1832 if (session->session_id != 0)
1833 atomic_dec(&l2tp_session_count);
1834 sock_put(tunnel->sock);
1835 session->tunnel = NULL;
1836 l2tp_tunnel_dec_refcount(tunnel);
1837 }
1838
1839 kfree(session);
1840
1841 return;
1842}
1843EXPORT_SYMBOL_GPL(l2tp_session_free);
1844
1845
1846
1847
1848
1849
1850void __l2tp_session_unhash(struct l2tp_session *session)
1851{
1852 struct l2tp_tunnel *tunnel = session->tunnel;
1853
1854
1855 if (tunnel) {
1856
1857 write_lock_bh(&tunnel->hlist_lock);
1858 hlist_del_init(&session->hlist);
1859 write_unlock_bh(&tunnel->hlist_lock);
1860
1861
1862 if (tunnel->version != L2TP_HDR_VER_2) {
1863 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1864 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1865 hlist_del_init_rcu(&session->global_hlist);
1866 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1867 synchronize_rcu();
1868 }
1869 }
1870}
1871EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1872
1873
1874
1875
1876int l2tp_session_delete(struct l2tp_session *session)
1877{
1878 if (session->ref)
1879 (*session->ref)(session);
1880 __l2tp_session_unhash(session);
1881 l2tp_session_queue_purge(session);
1882 if (session->session_close != NULL)
1883 (*session->session_close)(session);
1884 if (session->deref)
1885 (*session->deref)(session);
1886 l2tp_session_dec_refcount(session);
1887 return 0;
1888}
1889EXPORT_SYMBOL_GPL(l2tp_session_delete);
1890
1891
1892
1893
1894static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1895{
1896 if (version == L2TP_HDR_VER_2) {
1897 session->hdr_len = 6;
1898 if (session->send_seq)
1899 session->hdr_len += 4;
1900 } else {
1901 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1902 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1903 session->hdr_len += 4;
1904 }
1905
1906}
1907
1908struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1909{
1910 struct l2tp_session *session;
1911 int err;
1912
1913 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1914 if (session != NULL) {
1915 session->magic = L2TP_SESSION_MAGIC;
1916 session->tunnel = tunnel;
1917
1918 session->session_id = session_id;
1919 session->peer_session_id = peer_session_id;
1920 session->nr = 0;
1921 if (tunnel->version == L2TP_HDR_VER_2)
1922 session->nr_max = 0xffff;
1923 else
1924 session->nr_max = 0xffffff;
1925 session->nr_window_size = session->nr_max / 2;
1926 session->nr_oos_count_max = 4;
1927
1928
1929 session->reorder_skip = 1;
1930
1931 sprintf(&session->name[0], "sess %u/%u",
1932 tunnel->tunnel_id, session->session_id);
1933
1934 skb_queue_head_init(&session->reorder_q);
1935
1936 INIT_HLIST_NODE(&session->hlist);
1937 INIT_HLIST_NODE(&session->global_hlist);
1938
1939
1940 session->debug = tunnel->debug;
1941
1942 if (cfg) {
1943 session->pwtype = cfg->pw_type;
1944 session->debug = cfg->debug;
1945 session->mtu = cfg->mtu;
1946 session->mru = cfg->mru;
1947 session->send_seq = cfg->send_seq;
1948 session->recv_seq = cfg->recv_seq;
1949 session->lns_mode = cfg->lns_mode;
1950 session->reorder_timeout = cfg->reorder_timeout;
1951 session->offset = cfg->offset;
1952 session->l2specific_type = cfg->l2specific_type;
1953 session->l2specific_len = cfg->l2specific_len;
1954 session->cookie_len = cfg->cookie_len;
1955 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1956 session->peer_cookie_len = cfg->peer_cookie_len;
1957 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1958 }
1959
1960 if (tunnel->version == L2TP_HDR_VER_2)
1961 session->build_header = l2tp_build_l2tpv2_header;
1962 else
1963 session->build_header = l2tp_build_l2tpv3_header;
1964
1965 l2tp_session_set_header_len(session, tunnel->version);
1966
1967 l2tp_session_inc_refcount(session);
1968
1969 err = l2tp_session_add_to_tunnel(tunnel, session);
1970 if (err) {
1971 kfree(session);
1972
1973 return ERR_PTR(err);
1974 }
1975
1976
1977 if (session->session_id != 0)
1978 atomic_inc(&l2tp_session_count);
1979
1980 return session;
1981 }
1982
1983 return ERR_PTR(-ENOMEM);
1984}
1985EXPORT_SYMBOL_GPL(l2tp_session_create);
1986
1987
1988
1989
1990
1991static __net_init int l2tp_init_net(struct net *net)
1992{
1993 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1994 int hash;
1995
1996 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1997 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1998
1999 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
2000 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
2001
2002 spin_lock_init(&pn->l2tp_session_hlist_lock);
2003
2004 return 0;
2005}
2006
2007static __net_exit void l2tp_exit_net(struct net *net)
2008{
2009 struct l2tp_net *pn = l2tp_pernet(net);
2010 struct l2tp_tunnel *tunnel = NULL;
2011
2012 rcu_read_lock_bh();
2013 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
2014 (void)l2tp_tunnel_delete(tunnel);
2015 }
2016 rcu_read_unlock_bh();
2017
2018 flush_workqueue(l2tp_wq);
2019 rcu_barrier();
2020}
2021
2022static struct pernet_operations l2tp_net_ops = {
2023 .init = l2tp_init_net,
2024 .exit = l2tp_exit_net,
2025 .id = &l2tp_net_id,
2026 .size = sizeof(struct l2tp_net),
2027};
2028
2029static int __init l2tp_init(void)
2030{
2031 int rc = 0;
2032
2033 rc = register_pernet_device(&l2tp_net_ops);
2034 if (rc)
2035 goto out;
2036
2037 l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
2038 if (!l2tp_wq) {
2039 pr_err("alloc_workqueue failed\n");
2040 rc = -ENOMEM;
2041 goto out;
2042 }
2043
2044 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
2045
2046out:
2047 return rc;
2048}
2049
2050static void __exit l2tp_exit(void)
2051{
2052 unregister_pernet_device(&l2tp_net_ops);
2053 if (l2tp_wq) {
2054 destroy_workqueue(l2tp_wq);
2055 l2tp_wq = NULL;
2056 }
2057}
2058
2059module_init(l2tp_init);
2060module_exit(l2tp_exit);
2061
2062MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
2063MODULE_DESCRIPTION("L2TP core");
2064MODULE_LICENSE("GPL");
2065MODULE_VERSION(L2TP_DRV_VERSION);
2066
2067