1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Point-to-Point Tunneling Protocol for Linux
4 *
5 * Authors: Dmitry Kozlov <xeb@mail.ru>
6 */
7
8#include <linux/string.h>
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/netdevice.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/ppp_channel.h>
19#include <linux/ppp_defs.h>
20#include <linux/if_pppox.h>
21#include <linux/ppp-ioctl.h>
22#include <linux/notifier.h>
23#include <linux/file.h>
24#include <linux/in.h>
25#include <linux/ip.h>
26#include <linux/rcupdate.h>
27#include <linux/security.h>
28#include <linux/spinlock.h>
29
30#include <net/sock.h>
31#include <net/protocol.h>
32#include <net/ip.h>
33#include <net/icmp.h>
34#include <net/route.h>
35#include <net/gre.h>
36#include <net/pptp.h>
37
38#include <linux/uaccess.h>
39
40#define PPTP_DRIVER_VERSION "0.8.5"
41
42#define MAX_CALLID 65535
43
44static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
45static struct pppox_sock __rcu **callid_sock;
46
47static DEFINE_SPINLOCK(chan_lock);
48
49static struct proto pptp_sk_proto __read_mostly;
50static const struct ppp_channel_ops pptp_chan_ops;
51static const struct proto_ops pptp_ops;
52
53static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
54{
55 struct pppox_sock *sock;
56 struct pptp_opt *opt;
57
58 rcu_read_lock();
59 sock = rcu_dereference(callid_sock[call_id]);
60 if (sock) {
61 opt = &sock->proto.pptp;
62 if (opt->dst_addr.sin_addr.s_addr != s_addr)
63 sock = NULL;
64 else
65 sock_hold(sk: sk_pppox(po: sock));
66 }
67 rcu_read_unlock();
68
69 return sock;
70}
71
72static int lookup_chan_dst(u16 call_id, __be32 d_addr)
73{
74 struct pppox_sock *sock;
75 struct pptp_opt *opt;
76 int i;
77
78 rcu_read_lock();
79 i = 1;
80 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
81 sock = rcu_dereference(callid_sock[i]);
82 if (!sock)
83 continue;
84 opt = &sock->proto.pptp;
85 if (opt->dst_addr.call_id == call_id &&
86 opt->dst_addr.sin_addr.s_addr == d_addr)
87 break;
88 }
89 rcu_read_unlock();
90
91 return i < MAX_CALLID;
92}
93
94static int add_chan(struct pppox_sock *sock,
95 struct pptp_addr *sa)
96{
97 static int call_id;
98
99 spin_lock(lock: &chan_lock);
100 if (!sa->call_id) {
101 call_id = find_next_zero_bit(addr: callid_bitmap, MAX_CALLID, offset: call_id + 1);
102 if (call_id == MAX_CALLID) {
103 call_id = find_next_zero_bit(addr: callid_bitmap, MAX_CALLID, offset: 1);
104 if (call_id == MAX_CALLID)
105 goto out_err;
106 }
107 sa->call_id = call_id;
108 } else if (test_bit(sa->call_id, callid_bitmap)) {
109 goto out_err;
110 }
111
112 sock->proto.pptp.src_addr = *sa;
113 set_bit(nr: sa->call_id, addr: callid_bitmap);
114 rcu_assign_pointer(callid_sock[sa->call_id], sock);
115 spin_unlock(lock: &chan_lock);
116
117 return 0;
118
119out_err:
120 spin_unlock(lock: &chan_lock);
121 return -1;
122}
123
124static void del_chan(struct pppox_sock *sock)
125{
126 spin_lock(lock: &chan_lock);
127 clear_bit(nr: sock->proto.pptp.src_addr.call_id, addr: callid_bitmap);
128 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
129 spin_unlock(lock: &chan_lock);
130}
131
132static struct rtable *pptp_route_output(const struct pppox_sock *po,
133 struct flowi4 *fl4)
134{
135 const struct sock *sk = &po->sk;
136 struct net *net;
137
138 net = sock_net(sk);
139 flowi4_init_output(fl4, oif: sk->sk_bound_dev_if, mark: sk->sk_mark, tos: 0,
140 scope: RT_SCOPE_UNIVERSE, IPPROTO_GRE, flags: 0,
141 daddr: po->proto.pptp.dst_addr.sin_addr.s_addr,
142 saddr: po->proto.pptp.src_addr.sin_addr.s_addr,
143 dport: 0, sport: 0, uid: sock_net_uid(net, sk));
144 security_sk_classify_flow(sk, flic: flowi4_to_flowi_common(fl4));
145
146 return ip_route_output_flow(net, flp: fl4, sk);
147}
148
149static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
150{
151 struct sock *sk = chan->private;
152 struct pppox_sock *po = pppox_sk(sk);
153 struct net *net = sock_net(sk);
154 struct pptp_opt *opt = &po->proto.pptp;
155 struct pptp_gre_header *hdr;
156 unsigned int header_len = sizeof(*hdr);
157 struct flowi4 fl4;
158 int islcp;
159 int len;
160 unsigned char *data;
161 __u32 seq_recv;
162
163
164 struct rtable *rt;
165 struct net_device *tdev;
166 struct iphdr *iph;
167 int max_headroom;
168
169 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
170 goto tx_error;
171
172 rt = pptp_route_output(po, fl4: &fl4);
173 if (IS_ERR(ptr: rt))
174 goto tx_error;
175
176 tdev = rt->dst.dev;
177
178 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
179
180 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
181 struct sk_buff *new_skb = skb_realloc_headroom(skb, headroom: max_headroom);
182 if (!new_skb) {
183 ip_rt_put(rt);
184 goto tx_error;
185 }
186 if (skb->sk)
187 skb_set_owner_w(skb: new_skb, sk: skb->sk);
188 consume_skb(skb);
189 skb = new_skb;
190 }
191
192 data = skb->data;
193 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
194
195 /* compress protocol field */
196 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
197 skb_pull(skb, len: 1);
198
199 /* Put in the address/control bytes if necessary */
200 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
201 data = skb_push(skb, len: 2);
202 data[0] = PPP_ALLSTATIONS;
203 data[1] = PPP_UI;
204 }
205
206 len = skb->len;
207
208 seq_recv = opt->seq_recv;
209
210 if (opt->ack_sent == seq_recv)
211 header_len -= sizeof(hdr->ack);
212
213 /* Push down and install GRE header */
214 skb_push(skb, len: header_len);
215 hdr = (struct pptp_gre_header *)(skb->data);
216
217 hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
218 hdr->gre_hd.protocol = GRE_PROTO_PPP;
219 hdr->call_id = htons(opt->dst_addr.call_id);
220
221 hdr->seq = htonl(++opt->seq_sent);
222 if (opt->ack_sent != seq_recv) {
223 /* send ack with this message */
224 hdr->gre_hd.flags |= GRE_ACK;
225 hdr->ack = htonl(seq_recv);
226 opt->ack_sent = seq_recv;
227 }
228 hdr->payload_len = htons(len);
229
230 /* Push down and install the IP header. */
231
232 skb_reset_transport_header(skb);
233 skb_push(skb, len: sizeof(*iph));
234 skb_reset_network_header(skb);
235 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
236 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
237
238 iph = ip_hdr(skb);
239 iph->version = 4;
240 iph->ihl = sizeof(struct iphdr) >> 2;
241 if (ip_dont_fragment(sk, dst: &rt->dst))
242 iph->frag_off = htons(IP_DF);
243 else
244 iph->frag_off = 0;
245 iph->protocol = IPPROTO_GRE;
246 iph->tos = 0;
247 iph->daddr = fl4.daddr;
248 iph->saddr = fl4.saddr;
249 iph->ttl = ip4_dst_hoplimit(dst: &rt->dst);
250 iph->tot_len = htons(skb->len);
251
252 skb_dst_drop(skb);
253 skb_dst_set(skb, dst: &rt->dst);
254
255 nf_reset_ct(skb);
256
257 skb->ip_summed = CHECKSUM_NONE;
258 ip_select_ident(net, skb, NULL);
259 ip_send_check(ip: iph);
260
261 ip_local_out(net, sk: skb->sk, skb);
262 return 1;
263
264tx_error:
265 kfree_skb(skb);
266 return 1;
267}
268
269static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
270{
271 struct pppox_sock *po = pppox_sk(sk);
272 struct pptp_opt *opt = &po->proto.pptp;
273 int headersize, payload_len, seq;
274 __u8 *payload;
275 struct pptp_gre_header *header;
276
277 if (!(sk->sk_state & PPPOX_CONNECTED)) {
278 if (sock_queue_rcv_skb(sk, skb))
279 goto drop;
280 return NET_RX_SUCCESS;
281 }
282
283 header = (struct pptp_gre_header *)(skb->data);
284 headersize = sizeof(*header);
285
286 /* test if acknowledgement present */
287 if (GRE_IS_ACK(header->gre_hd.flags)) {
288 __u32 ack;
289
290 if (!pskb_may_pull(skb, len: headersize))
291 goto drop;
292 header = (struct pptp_gre_header *)(skb->data);
293
294 /* ack in different place if S = 0 */
295 ack = GRE_IS_SEQ(header->gre_hd.flags) ? ntohl(header->ack) :
296 ntohl(header->seq);
297 if (ack > opt->ack_recv)
298 opt->ack_recv = ack;
299 /* also handle sequence number wrap-around */
300 if (WRAPPED(ack, opt->ack_recv))
301 opt->ack_recv = ack;
302 } else {
303 headersize -= sizeof(header->ack);
304 }
305 /* test if payload present */
306 if (!GRE_IS_SEQ(header->gre_hd.flags))
307 goto drop;
308
309 payload_len = ntohs(header->payload_len);
310 seq = ntohl(header->seq);
311
312 /* check for incomplete packet (length smaller than expected) */
313 if (!pskb_may_pull(skb, len: headersize + payload_len))
314 goto drop;
315
316 payload = skb->data + headersize;
317 /* check for expected sequence number */
318 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
319 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
320 (PPP_PROTOCOL(payload) == PPP_LCP) &&
321 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
322 goto allow_packet;
323 } else {
324 opt->seq_recv = seq;
325allow_packet:
326 skb_pull(skb, len: headersize);
327
328 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
329 /* chop off address/control */
330 if (skb->len < 3)
331 goto drop;
332 skb_pull(skb, len: 2);
333 }
334
335 skb->ip_summed = CHECKSUM_NONE;
336 skb_set_network_header(skb, offset: skb->head-skb->data);
337 ppp_input(&po->chan, skb);
338
339 return NET_RX_SUCCESS;
340 }
341drop:
342 kfree_skb(skb);
343 return NET_RX_DROP;
344}
345
346static int pptp_rcv(struct sk_buff *skb)
347{
348 struct pppox_sock *po;
349 struct pptp_gre_header *header;
350 struct iphdr *iph;
351
352 if (skb->pkt_type != PACKET_HOST)
353 goto drop;
354
355 if (!pskb_may_pull(skb, len: 12))
356 goto drop;
357
358 iph = ip_hdr(skb);
359
360 header = (struct pptp_gre_header *)skb->data;
361
362 if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
363 GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */
364 GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
365 !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */
366 (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */
367 /* if invalid, discard this packet */
368 goto drop;
369
370 po = lookup_chan(ntohs(header->call_id), s_addr: iph->saddr);
371 if (po) {
372 skb_dst_drop(skb);
373 nf_reset_ct(skb);
374 return sk_receive_skb(sk: sk_pppox(po), skb, nested: 0);
375 }
376drop:
377 kfree_skb(skb);
378 return NET_RX_DROP;
379}
380
381static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
382 int sockaddr_len)
383{
384 struct sock *sk = sock->sk;
385 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
386 struct pppox_sock *po = pppox_sk(sk);
387 int error = 0;
388
389 if (sockaddr_len < sizeof(struct sockaddr_pppox))
390 return -EINVAL;
391
392 lock_sock(sk);
393
394 if (sk->sk_state & PPPOX_DEAD) {
395 error = -EALREADY;
396 goto out;
397 }
398
399 if (sk->sk_state & PPPOX_BOUND) {
400 error = -EBUSY;
401 goto out;
402 }
403
404 if (add_chan(sock: po, sa: &sp->sa_addr.pptp))
405 error = -EBUSY;
406 else
407 sk->sk_state |= PPPOX_BOUND;
408
409out:
410 release_sock(sk);
411 return error;
412}
413
414static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
415 int sockaddr_len, int flags)
416{
417 struct sock *sk = sock->sk;
418 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
419 struct pppox_sock *po = pppox_sk(sk);
420 struct pptp_opt *opt = &po->proto.pptp;
421 struct rtable *rt;
422 struct flowi4 fl4;
423 int error = 0;
424
425 if (sockaddr_len < sizeof(struct sockaddr_pppox))
426 return -EINVAL;
427
428 if (sp->sa_protocol != PX_PROTO_PPTP)
429 return -EINVAL;
430
431 if (lookup_chan_dst(call_id: sp->sa_addr.pptp.call_id, d_addr: sp->sa_addr.pptp.sin_addr.s_addr))
432 return -EALREADY;
433
434 lock_sock(sk);
435 /* Check for already bound sockets */
436 if (sk->sk_state & PPPOX_CONNECTED) {
437 error = -EBUSY;
438 goto end;
439 }
440
441 /* Check for already disconnected sockets, on attempts to disconnect */
442 if (sk->sk_state & PPPOX_DEAD) {
443 error = -EALREADY;
444 goto end;
445 }
446
447 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
448 error = -EINVAL;
449 goto end;
450 }
451
452 po->chan.private = sk;
453 po->chan.ops = &pptp_chan_ops;
454
455 rt = pptp_route_output(po, fl4: &fl4);
456 if (IS_ERR(ptr: rt)) {
457 error = -EHOSTUNREACH;
458 goto end;
459 }
460 sk_setup_caps(sk, dst: &rt->dst);
461
462 po->chan.mtu = dst_mtu(dst: &rt->dst);
463 if (!po->chan.mtu)
464 po->chan.mtu = PPP_MRU;
465 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
466
467 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
468 error = ppp_register_channel(&po->chan);
469 if (error) {
470 pr_err("PPTP: failed to register PPP channel (%d)\n", error);
471 goto end;
472 }
473
474 opt->dst_addr = sp->sa_addr.pptp;
475 sk->sk_state |= PPPOX_CONNECTED;
476
477 end:
478 release_sock(sk);
479 return error;
480}
481
482static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
483 int peer)
484{
485 int len = sizeof(struct sockaddr_pppox);
486 struct sockaddr_pppox sp;
487
488 memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
489
490 sp.sa_family = AF_PPPOX;
491 sp.sa_protocol = PX_PROTO_PPTP;
492 sp.sa_addr.pptp = pppox_sk(sk: sock->sk)->proto.pptp.src_addr;
493
494 memcpy(uaddr, &sp, len);
495
496 return len;
497}
498
499static int pptp_release(struct socket *sock)
500{
501 struct sock *sk = sock->sk;
502 struct pppox_sock *po;
503 int error = 0;
504
505 if (!sk)
506 return 0;
507
508 lock_sock(sk);
509
510 if (sock_flag(sk, flag: SOCK_DEAD)) {
511 release_sock(sk);
512 return -EBADF;
513 }
514
515 po = pppox_sk(sk);
516 del_chan(sock: po);
517 synchronize_rcu();
518
519 pppox_unbind_sock(sk);
520 sk->sk_state = PPPOX_DEAD;
521
522 sock_orphan(sk);
523 sock->sk = NULL;
524
525 release_sock(sk);
526 sock_put(sk);
527
528 return error;
529}
530
531static void pptp_sock_destruct(struct sock *sk)
532{
533 if (!(sk->sk_state & PPPOX_DEAD)) {
534 del_chan(sock: pppox_sk(sk));
535 pppox_unbind_sock(sk);
536 }
537 skb_queue_purge(list: &sk->sk_receive_queue);
538 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
539}
540
541static int pptp_create(struct net *net, struct socket *sock, int kern)
542{
543 int error = -ENOMEM;
544 struct sock *sk;
545 struct pppox_sock *po;
546 struct pptp_opt *opt;
547
548 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, prot: &pptp_sk_proto, kern);
549 if (!sk)
550 goto out;
551
552 sock_init_data(sock, sk);
553
554 sock->state = SS_UNCONNECTED;
555 sock->ops = &pptp_ops;
556
557 sk->sk_backlog_rcv = pptp_rcv_core;
558 sk->sk_state = PPPOX_NONE;
559 sk->sk_type = SOCK_STREAM;
560 sk->sk_family = PF_PPPOX;
561 sk->sk_protocol = PX_PROTO_PPTP;
562 sk->sk_destruct = pptp_sock_destruct;
563
564 po = pppox_sk(sk);
565 opt = &po->proto.pptp;
566
567 opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
568 opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
569
570 error = 0;
571out:
572 return error;
573}
574
575static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
576 unsigned long arg)
577{
578 struct sock *sk = chan->private;
579 struct pppox_sock *po = pppox_sk(sk);
580 struct pptp_opt *opt = &po->proto.pptp;
581 void __user *argp = (void __user *)arg;
582 int __user *p = argp;
583 int err, val;
584
585 err = -EFAULT;
586 switch (cmd) {
587 case PPPIOCGFLAGS:
588 val = opt->ppp_flags;
589 if (put_user(val, p))
590 break;
591 err = 0;
592 break;
593 case PPPIOCSFLAGS:
594 if (get_user(val, p))
595 break;
596 opt->ppp_flags = val & ~SC_RCV_BITS;
597 err = 0;
598 break;
599 default:
600 err = -ENOTTY;
601 }
602
603 return err;
604}
605
606static const struct ppp_channel_ops pptp_chan_ops = {
607 .start_xmit = pptp_xmit,
608 .ioctl = pptp_ppp_ioctl,
609};
610
611static struct proto pptp_sk_proto __read_mostly = {
612 .name = "PPTP",
613 .owner = THIS_MODULE,
614 .obj_size = sizeof(struct pppox_sock),
615};
616
617static const struct proto_ops pptp_ops = {
618 .family = AF_PPPOX,
619 .owner = THIS_MODULE,
620 .release = pptp_release,
621 .bind = pptp_bind,
622 .connect = pptp_connect,
623 .socketpair = sock_no_socketpair,
624 .accept = sock_no_accept,
625 .getname = pptp_getname,
626 .listen = sock_no_listen,
627 .shutdown = sock_no_shutdown,
628 .sendmsg = sock_no_sendmsg,
629 .recvmsg = sock_no_recvmsg,
630 .mmap = sock_no_mmap,
631 .ioctl = pppox_ioctl,
632#ifdef CONFIG_COMPAT
633 .compat_ioctl = pppox_compat_ioctl,
634#endif
635};
636
637static const struct pppox_proto pppox_pptp_proto = {
638 .create = pptp_create,
639 .owner = THIS_MODULE,
640};
641
642static const struct gre_protocol gre_pptp_protocol = {
643 .handler = pptp_rcv,
644};
645
646static int __init pptp_init_module(void)
647{
648 int err = 0;
649 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
650
651 callid_sock = vzalloc(array_size(sizeof(void *), (MAX_CALLID + 1)));
652 if (!callid_sock)
653 return -ENOMEM;
654
655 err = gre_add_protocol(proto: &gre_pptp_protocol, GREPROTO_PPTP);
656 if (err) {
657 pr_err("PPTP: can't add gre protocol\n");
658 goto out_mem_free;
659 }
660
661 err = proto_register(prot: &pptp_sk_proto, alloc_slab: 0);
662 if (err) {
663 pr_err("PPTP: can't register sk_proto\n");
664 goto out_gre_del_protocol;
665 }
666
667 err = register_pppox_proto(PX_PROTO_PPTP, pp: &pppox_pptp_proto);
668 if (err) {
669 pr_err("PPTP: can't register pppox_proto\n");
670 goto out_unregister_sk_proto;
671 }
672
673 return 0;
674
675out_unregister_sk_proto:
676 proto_unregister(prot: &pptp_sk_proto);
677out_gre_del_protocol:
678 gre_del_protocol(proto: &gre_pptp_protocol, GREPROTO_PPTP);
679out_mem_free:
680 vfree(addr: callid_sock);
681
682 return err;
683}
684
685static void __exit pptp_exit_module(void)
686{
687 unregister_pppox_proto(PX_PROTO_PPTP);
688 proto_unregister(prot: &pptp_sk_proto);
689 gre_del_protocol(proto: &gre_pptp_protocol, GREPROTO_PPTP);
690 vfree(addr: callid_sock);
691}
692
693module_init(pptp_init_module);
694module_exit(pptp_exit_module);
695
696MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
697MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>");
698MODULE_LICENSE("GPL");
699MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);
700

source code of linux/drivers/net/ppp/pptp.c