blob: c6c41a7836c93c732595211f5c34d13c7b406fff [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Generic PPP layer for Linux.
4 *
5 * Copyright 1999-2002 Paul Mackerras.
6 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 * The generic PPP layer handles the PPP network interfaces, the
8 * /dev/ppp device, packet and VJ compression, and multilink.
9 * It talks to PPP `channels' via the interface defined in
10 * include/linux/ppp_channel.h. Channels provide the basic means for
11 * sending and receiving PPP frames on some kind of communications
12 * channel.
13 *
14 * Part of the code in this driver was inspired by the old async-only
15 * PPP driver, written by Michael Callahan and Al Longyear, and
16 * subsequently hacked by Paul Mackerras.
17 *
18 * ==FILEVERSION 20041108==
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/sched/signal.h>
24#include <linux/kmod.h>
25#include <linux/init.h>
26#include <linux/list.h>
27#include <linux/idr.h>
28#include <linux/netdevice.h>
29#include <linux/poll.h>
30#include <linux/ppp_defs.h>
31#include <linux/filter.h>
32#include <linux/ppp-ioctl.h>
33#include <linux/ppp_channel.h>
34#include <linux/ppp-comp.h>
35#include <linux/skbuff.h>
36#include <linux/rtnetlink.h>
37#include <linux/if_arp.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/spinlock.h>
41#include <linux/rwsem.h>
42#include <linux/stddef.h>
43#include <linux/device.h>
44#include <linux/mutex.h>
45#include <linux/slab.h>
46#include <linux/file.h>
47#include <asm/unaligned.h>
48#include <net/slhc_vj.h>
49#include <linux/atomic.h>
50#include <linux/refcount.h>
51
52#include <linux/nsproxy.h>
53#include <net/net_namespace.h>
54#include <net/netns/generic.h>
55
56#define PPP_VERSION "2.4.2"
57
58/*
59 * Network protocols we support.
60 */
61#define NP_IP 0 /* Internet Protocol V4 */
62#define NP_IPV6 1 /* Internet Protocol V6 */
63#define NP_IPX 2 /* IPX protocol */
64#define NP_AT 3 /* Appletalk protocol */
65#define NP_MPLS_UC 4 /* MPLS unicast */
66#define NP_MPLS_MC 5 /* MPLS multicast */
67#define NUM_NP 6 /* Number of NPs. */
68
69#define MPHDRLEN 6 /* multilink protocol header length */
70#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
71
72/*
73 * An instance of /dev/ppp can be associated with either a ppp
74 * interface unit or a ppp channel. In both cases, file->private_data
75 * points to one of these.
76 */
77struct ppp_file {
78 enum {
79 INTERFACE=1, CHANNEL
80 } kind;
81 struct sk_buff_head xq; /* pppd transmit queue */
82 struct sk_buff_head rq; /* receive queue for pppd */
83 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
84 refcount_t refcnt; /* # refs (incl /dev/ppp attached) */
85 int hdrlen; /* space to leave for headers */
86 int index; /* interface unit / channel number */
87 int dead; /* unit/channel has been shut down */
88};
89
90#define PF_TO_X(pf, X) container_of(pf, X, file)
91
92#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
93#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
94
95/*
96 * Data structure to hold primary network stats for which
97 * we want to use 64 bit storage. Other network stats
98 * are stored in dev->stats of the ppp strucute.
99 */
100struct ppp_link_stats {
101 u64 rx_packets;
102 u64 tx_packets;
103 u64 rx_bytes;
104 u64 tx_bytes;
105};
106
107/*
108 * Data structure describing one ppp unit.
109 * A ppp unit corresponds to a ppp network interface device
110 * and represents a multilink bundle.
111 * It can have 0 or more ppp channels connected to it.
112 */
113struct ppp {
114 struct ppp_file file; /* stuff for read/write/poll 0 */
115 struct file *owner; /* file that owns this unit 48 */
116 struct list_head channels; /* list of attached channels 4c */
117 int n_channels; /* how many channels are attached 54 */
118 spinlock_t rlock; /* lock for receive side 58 */
119 spinlock_t wlock; /* lock for transmit side 5c */
120 int __percpu *xmit_recursion; /* xmit recursion detect */
121 int mru; /* max receive unit 60 */
122 unsigned int flags; /* control bits 64 */
123 unsigned int xstate; /* transmit state bits 68 */
124 unsigned int rstate; /* receive state bits 6c */
125 int debug; /* debug flags 70 */
126 struct slcompress *vj; /* state for VJ header compression */
127 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
128 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
129 struct compressor *xcomp; /* transmit packet compressor 8c */
130 void *xc_state; /* its internal state 90 */
131 struct compressor *rcomp; /* receive decompressor 94 */
132 void *rc_state; /* its internal state 98 */
133 unsigned long last_xmit; /* jiffies when last pkt sent 9c */
134 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
135 struct net_device *dev; /* network interface device a4 */
136 int closing; /* is device closing down? a8 */
137#ifdef CONFIG_PPP_MULTILINK
138 int nxchan; /* next channel to send something on */
139 u32 nxseq; /* next sequence number to send */
140 int mrru; /* MP: max reconst. receive unit */
141 u32 nextseq; /* MP: seq no of next packet */
142 u32 minseq; /* MP: min of most recent seqnos */
143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
144#endif /* CONFIG_PPP_MULTILINK */
145#ifdef CONFIG_PPP_FILTER
146 struct bpf_prog *pass_filter; /* filter for packets to pass */
147 struct bpf_prog *active_filter; /* filter for pkts to reset idle */
148#endif /* CONFIG_PPP_FILTER */
149 struct net *ppp_net; /* the net we belong to */
150 struct ppp_link_stats stats64; /* 64 bit network stats */
151};
152
153/*
154 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
155 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
156 * SC_MUST_COMP
157 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
158 * Bits in xstate: SC_COMP_RUN
159 */
160#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
161 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
162 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
163
164/*
165 * Private data structure for each channel.
166 * This includes the data structure used for multilink.
167 */
168struct channel {
169 struct ppp_file file; /* stuff for read/write/poll */
170 struct list_head list; /* link in all/new_channels list */
171 struct ppp_channel *chan; /* public channel data structure */
172 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
173 spinlock_t downl; /* protects `chan', file.xq dequeue */
174 struct ppp *ppp; /* ppp unit we're connected to */
175 struct net *chan_net; /* the net channel belongs to */
176 struct list_head clist; /* link in list of channels per unit */
177 rwlock_t upl; /* protects `ppp' */
178#ifdef CONFIG_PPP_MULTILINK
179 u8 avail; /* flag used in multilink stuff */
180 u8 had_frag; /* >= 1 fragments have been sent */
181 u32 lastseq; /* MP: last sequence # received */
182 int speed; /* speed of the corresponding ppp channel*/
183#endif /* CONFIG_PPP_MULTILINK */
184};
185
186struct ppp_config {
187 struct file *file;
188 s32 unit;
189 bool ifname_is_set;
190};
191
192/*
193 * SMP locking issues:
194 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
195 * list and the ppp.n_channels field, you need to take both locks
196 * before you modify them.
197 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
198 * channel.downl.
199 */
200
201static DEFINE_MUTEX(ppp_mutex);
202static atomic_t ppp_unit_count = ATOMIC_INIT(0);
203static atomic_t channel_count = ATOMIC_INIT(0);
204
205/* per-net private data for this module */
206static unsigned int ppp_net_id __read_mostly;
207struct ppp_net {
208 /* units to ppp mapping */
209 struct idr units_idr;
210
211 /*
212 * all_ppp_mutex protects the units_idr mapping.
213 * It also ensures that finding a ppp unit in the units_idr
214 * map and updating its file.refcnt field is atomic.
215 */
216 struct mutex all_ppp_mutex;
217
218 /* channels */
219 struct list_head all_channels;
220 struct list_head new_channels;
221 int last_channel_index;
222
223 /*
224 * all_channels_lock protects all_channels and
225 * last_channel_index, and the atomicity of find
226 * a channel and updating its file.refcnt field.
227 */
228 spinlock_t all_channels_lock;
229};
230
231/* Get the PPP protocol number from a skb */
232#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
233
234/* We limit the length of ppp->file.rq to this (arbitrary) value */
235#define PPP_MAX_RQLEN 32
236
237/*
238 * Maximum number of multilink fragments queued up.
239 * This has to be large enough to cope with the maximum latency of
240 * the slowest channel relative to the others. Strictly it should
241 * depend on the number of channels and their characteristics.
242 */
243#define PPP_MP_MAX_QLEN 128
244
245/* Multilink header bits. */
246#define B 0x80 /* this fragment begins a packet */
247#define E 0x40 /* this fragment ends a packet */
248
249/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
250#define seq_before(a, b) ((s32)((a) - (b)) < 0)
251#define seq_after(a, b) ((s32)((a) - (b)) > 0)
252
253/* Prototypes. */
254static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
255 struct file *file, unsigned int cmd, unsigned long arg);
256static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
257static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
258static void ppp_push(struct ppp *ppp);
259static void ppp_channel_push(struct channel *pch);
260static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
261 struct channel *pch);
262static void ppp_receive_error(struct ppp *ppp);
263static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
264static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
265 struct sk_buff *skb);
266#ifdef CONFIG_PPP_MULTILINK
267static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
268 struct channel *pch);
269static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
270static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
271static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
272#endif /* CONFIG_PPP_MULTILINK */
273static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
274static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
275static void ppp_ccp_closed(struct ppp *ppp);
276static struct compressor *find_compressor(int type);
277static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
278static int ppp_create_interface(struct net *net, struct file *file, int *unit);
279static void init_ppp_file(struct ppp_file *pf, int kind);
280static void ppp_destroy_interface(struct ppp *ppp);
281static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
282static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
283static int ppp_connect_channel(struct channel *pch, int unit);
284static int ppp_disconnect_channel(struct channel *pch);
285static void ppp_destroy_channel(struct channel *pch);
Olivier Deprez0e641232021-09-23 10:07:05 +0200286static int unit_get(struct idr *p, void *ptr, int min);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000287static int unit_set(struct idr *p, void *ptr, int n);
288static void unit_put(struct idr *p, int n);
289static void *unit_find(struct idr *p, int n);
290static void ppp_setup(struct net_device *dev);
291
292static const struct net_device_ops ppp_netdev_ops;
293
294static struct class *ppp_class;
295
296/* per net-namespace data */
297static inline struct ppp_net *ppp_pernet(struct net *net)
298{
299 BUG_ON(!net);
300
301 return net_generic(net, ppp_net_id);
302}
303
304/* Translates a PPP protocol number to a NP index (NP == network protocol) */
305static inline int proto_to_npindex(int proto)
306{
307 switch (proto) {
308 case PPP_IP:
309 return NP_IP;
310 case PPP_IPV6:
311 return NP_IPV6;
312 case PPP_IPX:
313 return NP_IPX;
314 case PPP_AT:
315 return NP_AT;
316 case PPP_MPLS_UC:
317 return NP_MPLS_UC;
318 case PPP_MPLS_MC:
319 return NP_MPLS_MC;
320 }
321 return -EINVAL;
322}
323
324/* Translates an NP index into a PPP protocol number */
325static const int npindex_to_proto[NUM_NP] = {
326 PPP_IP,
327 PPP_IPV6,
328 PPP_IPX,
329 PPP_AT,
330 PPP_MPLS_UC,
331 PPP_MPLS_MC,
332};
333
334/* Translates an ethertype into an NP index */
335static inline int ethertype_to_npindex(int ethertype)
336{
337 switch (ethertype) {
338 case ETH_P_IP:
339 return NP_IP;
340 case ETH_P_IPV6:
341 return NP_IPV6;
342 case ETH_P_IPX:
343 return NP_IPX;
344 case ETH_P_PPPTALK:
345 case ETH_P_ATALK:
346 return NP_AT;
347 case ETH_P_MPLS_UC:
348 return NP_MPLS_UC;
349 case ETH_P_MPLS_MC:
350 return NP_MPLS_MC;
351 }
352 return -1;
353}
354
355/* Translates an NP index into an ethertype */
356static const int npindex_to_ethertype[NUM_NP] = {
357 ETH_P_IP,
358 ETH_P_IPV6,
359 ETH_P_IPX,
360 ETH_P_PPPTALK,
361 ETH_P_MPLS_UC,
362 ETH_P_MPLS_MC,
363};
364
365/*
366 * Locking shorthand.
367 */
368#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
369#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
370#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
371#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
372#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
373 ppp_recv_lock(ppp); } while (0)
374#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
375 ppp_xmit_unlock(ppp); } while (0)
376
377/*
378 * /dev/ppp device routines.
379 * The /dev/ppp device is used by pppd to control the ppp unit.
380 * It supports the read, write, ioctl and poll functions.
381 * Open instances of /dev/ppp can be in one of three states:
382 * unattached, attached to a ppp unit, or attached to a ppp channel.
383 */
384static int ppp_open(struct inode *inode, struct file *file)
385{
386 /*
387 * This could (should?) be enforced by the permissions on /dev/ppp.
388 */
389 if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
390 return -EPERM;
391 return 0;
392}
393
394static int ppp_release(struct inode *unused, struct file *file)
395{
396 struct ppp_file *pf = file->private_data;
397 struct ppp *ppp;
398
399 if (pf) {
400 file->private_data = NULL;
401 if (pf->kind == INTERFACE) {
402 ppp = PF_TO_PPP(pf);
403 rtnl_lock();
404 if (file == ppp->owner)
405 unregister_netdevice(ppp->dev);
406 rtnl_unlock();
407 }
408 if (refcount_dec_and_test(&pf->refcnt)) {
409 switch (pf->kind) {
410 case INTERFACE:
411 ppp_destroy_interface(PF_TO_PPP(pf));
412 break;
413 case CHANNEL:
414 ppp_destroy_channel(PF_TO_CHANNEL(pf));
415 break;
416 }
417 }
418 }
419 return 0;
420}
421
422static ssize_t ppp_read(struct file *file, char __user *buf,
423 size_t count, loff_t *ppos)
424{
425 struct ppp_file *pf = file->private_data;
426 DECLARE_WAITQUEUE(wait, current);
427 ssize_t ret;
428 struct sk_buff *skb = NULL;
429 struct iovec iov;
430 struct iov_iter to;
431
432 ret = count;
433
434 if (!pf)
435 return -ENXIO;
436 add_wait_queue(&pf->rwait, &wait);
437 for (;;) {
438 set_current_state(TASK_INTERRUPTIBLE);
439 skb = skb_dequeue(&pf->rq);
440 if (skb)
441 break;
442 ret = 0;
443 if (pf->dead)
444 break;
445 if (pf->kind == INTERFACE) {
446 /*
447 * Return 0 (EOF) on an interface that has no
448 * channels connected, unless it is looping
449 * network traffic (demand mode).
450 */
451 struct ppp *ppp = PF_TO_PPP(pf);
452
453 ppp_recv_lock(ppp);
454 if (ppp->n_channels == 0 &&
455 (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
456 ppp_recv_unlock(ppp);
457 break;
458 }
459 ppp_recv_unlock(ppp);
460 }
461 ret = -EAGAIN;
462 if (file->f_flags & O_NONBLOCK)
463 break;
464 ret = -ERESTARTSYS;
465 if (signal_pending(current))
466 break;
467 schedule();
468 }
469 set_current_state(TASK_RUNNING);
470 remove_wait_queue(&pf->rwait, &wait);
471
472 if (!skb)
473 goto out;
474
475 ret = -EOVERFLOW;
476 if (skb->len > count)
477 goto outf;
478 ret = -EFAULT;
479 iov.iov_base = buf;
480 iov.iov_len = count;
481 iov_iter_init(&to, READ, &iov, 1, count);
482 if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
483 goto outf;
484 ret = skb->len;
485
486 outf:
487 kfree_skb(skb);
488 out:
489 return ret;
490}
491
492static ssize_t ppp_write(struct file *file, const char __user *buf,
493 size_t count, loff_t *ppos)
494{
495 struct ppp_file *pf = file->private_data;
496 struct sk_buff *skb;
497 ssize_t ret;
498
499 if (!pf)
500 return -ENXIO;
501 ret = -ENOMEM;
502 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
503 if (!skb)
504 goto out;
505 skb_reserve(skb, pf->hdrlen);
506 ret = -EFAULT;
507 if (copy_from_user(skb_put(skb, count), buf, count)) {
508 kfree_skb(skb);
509 goto out;
510 }
511
512 switch (pf->kind) {
513 case INTERFACE:
514 ppp_xmit_process(PF_TO_PPP(pf), skb);
515 break;
516 case CHANNEL:
517 skb_queue_tail(&pf->xq, skb);
518 ppp_channel_push(PF_TO_CHANNEL(pf));
519 break;
520 }
521
522 ret = count;
523
524 out:
525 return ret;
526}
527
528/* No kernel lock - fine */
529static __poll_t ppp_poll(struct file *file, poll_table *wait)
530{
531 struct ppp_file *pf = file->private_data;
532 __poll_t mask;
533
534 if (!pf)
535 return 0;
536 poll_wait(file, &pf->rwait, wait);
537 mask = EPOLLOUT | EPOLLWRNORM;
538 if (skb_peek(&pf->rq))
539 mask |= EPOLLIN | EPOLLRDNORM;
540 if (pf->dead)
541 mask |= EPOLLHUP;
542 else if (pf->kind == INTERFACE) {
543 /* see comment in ppp_read */
544 struct ppp *ppp = PF_TO_PPP(pf);
545
546 ppp_recv_lock(ppp);
547 if (ppp->n_channels == 0 &&
548 (ppp->flags & SC_LOOP_TRAFFIC) == 0)
549 mask |= EPOLLIN | EPOLLRDNORM;
550 ppp_recv_unlock(ppp);
551 }
552
553 return mask;
554}
555
556#ifdef CONFIG_PPP_FILTER
557static int get_filter(void __user *arg, struct sock_filter **p)
558{
559 struct sock_fprog uprog;
560 struct sock_filter *code = NULL;
561 int len;
562
563 if (copy_from_user(&uprog, arg, sizeof(uprog)))
564 return -EFAULT;
565
566 if (!uprog.len) {
567 *p = NULL;
568 return 0;
569 }
570
571 len = uprog.len * sizeof(struct sock_filter);
572 code = memdup_user(uprog.filter, len);
573 if (IS_ERR(code))
574 return PTR_ERR(code);
575
576 *p = code;
577 return uprog.len;
578}
579#endif /* CONFIG_PPP_FILTER */
580
581static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
582{
583 struct ppp_file *pf;
584 struct ppp *ppp;
585 int err = -EFAULT, val, val2, i;
586 struct ppp_idle idle;
587 struct npioctl npi;
588 int unit, cflags;
589 struct slcompress *vj;
590 void __user *argp = (void __user *)arg;
591 int __user *p = argp;
592
593 mutex_lock(&ppp_mutex);
594
595 pf = file->private_data;
596 if (!pf) {
597 err = ppp_unattached_ioctl(current->nsproxy->net_ns,
598 pf, file, cmd, arg);
599 goto out;
600 }
601
602 if (cmd == PPPIOCDETACH) {
603 /*
604 * PPPIOCDETACH is no longer supported as it was heavily broken,
605 * and is only known to have been used by pppd older than
606 * ppp-2.4.2 (released November 2003).
607 */
608 pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
609 current->comm, current->pid);
610 err = -EINVAL;
611 goto out;
612 }
613
614 if (pf->kind == CHANNEL) {
615 struct channel *pch;
616 struct ppp_channel *chan;
617
618 pch = PF_TO_CHANNEL(pf);
619
620 switch (cmd) {
621 case PPPIOCCONNECT:
622 if (get_user(unit, p))
623 break;
624 err = ppp_connect_channel(pch, unit);
625 break;
626
627 case PPPIOCDISCONN:
628 err = ppp_disconnect_channel(pch);
629 break;
630
631 default:
632 down_read(&pch->chan_sem);
633 chan = pch->chan;
634 err = -ENOTTY;
635 if (chan && chan->ops->ioctl)
636 err = chan->ops->ioctl(chan, cmd, arg);
637 up_read(&pch->chan_sem);
638 }
639 goto out;
640 }
641
642 if (pf->kind != INTERFACE) {
643 /* can't happen */
644 pr_err("PPP: not interface or channel??\n");
645 err = -EINVAL;
646 goto out;
647 }
648
649 ppp = PF_TO_PPP(pf);
650 switch (cmd) {
651 case PPPIOCSMRU:
652 if (get_user(val, p))
653 break;
654 ppp->mru = val;
655 err = 0;
656 break;
657
658 case PPPIOCSFLAGS:
659 if (get_user(val, p))
660 break;
661 ppp_lock(ppp);
662 cflags = ppp->flags & ~val;
663#ifdef CONFIG_PPP_MULTILINK
664 if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
665 ppp->nextseq = 0;
666#endif
667 ppp->flags = val & SC_FLAG_BITS;
668 ppp_unlock(ppp);
669 if (cflags & SC_CCP_OPEN)
670 ppp_ccp_closed(ppp);
671 err = 0;
672 break;
673
674 case PPPIOCGFLAGS:
675 val = ppp->flags | ppp->xstate | ppp->rstate;
676 if (put_user(val, p))
677 break;
678 err = 0;
679 break;
680
681 case PPPIOCSCOMPRESS:
682 err = ppp_set_compress(ppp, arg);
683 break;
684
685 case PPPIOCGUNIT:
686 if (put_user(ppp->file.index, p))
687 break;
688 err = 0;
689 break;
690
691 case PPPIOCSDEBUG:
692 if (get_user(val, p))
693 break;
694 ppp->debug = val;
695 err = 0;
696 break;
697
698 case PPPIOCGDEBUG:
699 if (put_user(ppp->debug, p))
700 break;
701 err = 0;
702 break;
703
704 case PPPIOCGIDLE:
705 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
706 idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
707 if (copy_to_user(argp, &idle, sizeof(idle)))
708 break;
709 err = 0;
710 break;
711
712 case PPPIOCSMAXCID:
713 if (get_user(val, p))
714 break;
715 val2 = 15;
716 if ((val >> 16) != 0) {
717 val2 = val >> 16;
718 val &= 0xffff;
719 }
720 vj = slhc_init(val2+1, val+1);
721 if (IS_ERR(vj)) {
722 err = PTR_ERR(vj);
723 break;
724 }
725 ppp_lock(ppp);
726 if (ppp->vj)
727 slhc_free(ppp->vj);
728 ppp->vj = vj;
729 ppp_unlock(ppp);
730 err = 0;
731 break;
732
733 case PPPIOCGNPMODE:
734 case PPPIOCSNPMODE:
735 if (copy_from_user(&npi, argp, sizeof(npi)))
736 break;
737 err = proto_to_npindex(npi.protocol);
738 if (err < 0)
739 break;
740 i = err;
741 if (cmd == PPPIOCGNPMODE) {
742 err = -EFAULT;
743 npi.mode = ppp->npmode[i];
744 if (copy_to_user(argp, &npi, sizeof(npi)))
745 break;
746 } else {
747 ppp->npmode[i] = npi.mode;
748 /* we may be able to transmit more packets now (??) */
749 netif_wake_queue(ppp->dev);
750 }
751 err = 0;
752 break;
753
754#ifdef CONFIG_PPP_FILTER
755 case PPPIOCSPASS:
756 {
757 struct sock_filter *code;
758
759 err = get_filter(argp, &code);
760 if (err >= 0) {
761 struct bpf_prog *pass_filter = NULL;
762 struct sock_fprog_kern fprog = {
763 .len = err,
764 .filter = code,
765 };
766
767 err = 0;
768 if (fprog.filter)
769 err = bpf_prog_create(&pass_filter, &fprog);
770 if (!err) {
771 ppp_lock(ppp);
772 if (ppp->pass_filter)
773 bpf_prog_destroy(ppp->pass_filter);
774 ppp->pass_filter = pass_filter;
775 ppp_unlock(ppp);
776 }
777 kfree(code);
778 }
779 break;
780 }
781 case PPPIOCSACTIVE:
782 {
783 struct sock_filter *code;
784
785 err = get_filter(argp, &code);
786 if (err >= 0) {
787 struct bpf_prog *active_filter = NULL;
788 struct sock_fprog_kern fprog = {
789 .len = err,
790 .filter = code,
791 };
792
793 err = 0;
794 if (fprog.filter)
795 err = bpf_prog_create(&active_filter, &fprog);
796 if (!err) {
797 ppp_lock(ppp);
798 if (ppp->active_filter)
799 bpf_prog_destroy(ppp->active_filter);
800 ppp->active_filter = active_filter;
801 ppp_unlock(ppp);
802 }
803 kfree(code);
804 }
805 break;
806 }
807#endif /* CONFIG_PPP_FILTER */
808
809#ifdef CONFIG_PPP_MULTILINK
810 case PPPIOCSMRRU:
811 if (get_user(val, p))
812 break;
813 ppp_recv_lock(ppp);
814 ppp->mrru = val;
815 ppp_recv_unlock(ppp);
816 err = 0;
817 break;
818#endif /* CONFIG_PPP_MULTILINK */
819
820 default:
821 err = -ENOTTY;
822 }
823
824out:
825 mutex_unlock(&ppp_mutex);
826
827 return err;
828}
829
830static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
831 struct file *file, unsigned int cmd, unsigned long arg)
832{
833 int unit, err = -EFAULT;
834 struct ppp *ppp;
835 struct channel *chan;
836 struct ppp_net *pn;
837 int __user *p = (int __user *)arg;
838
839 switch (cmd) {
840 case PPPIOCNEWUNIT:
841 /* Create a new ppp unit */
842 if (get_user(unit, p))
843 break;
844 err = ppp_create_interface(net, file, &unit);
845 if (err < 0)
846 break;
847
848 err = -EFAULT;
849 if (put_user(unit, p))
850 break;
851 err = 0;
852 break;
853
854 case PPPIOCATTACH:
855 /* Attach to an existing ppp unit */
856 if (get_user(unit, p))
857 break;
858 err = -ENXIO;
859 pn = ppp_pernet(net);
860 mutex_lock(&pn->all_ppp_mutex);
861 ppp = ppp_find_unit(pn, unit);
862 if (ppp) {
863 refcount_inc(&ppp->file.refcnt);
864 file->private_data = &ppp->file;
865 err = 0;
866 }
867 mutex_unlock(&pn->all_ppp_mutex);
868 break;
869
870 case PPPIOCATTCHAN:
871 if (get_user(unit, p))
872 break;
873 err = -ENXIO;
874 pn = ppp_pernet(net);
875 spin_lock_bh(&pn->all_channels_lock);
876 chan = ppp_find_channel(pn, unit);
877 if (chan) {
878 refcount_inc(&chan->file.refcnt);
879 file->private_data = &chan->file;
880 err = 0;
881 }
882 spin_unlock_bh(&pn->all_channels_lock);
883 break;
884
885 default:
886 err = -ENOTTY;
887 }
888
889 return err;
890}
891
892static const struct file_operations ppp_device_fops = {
893 .owner = THIS_MODULE,
894 .read = ppp_read,
895 .write = ppp_write,
896 .poll = ppp_poll,
897 .unlocked_ioctl = ppp_ioctl,
898 .open = ppp_open,
899 .release = ppp_release,
900 .llseek = noop_llseek,
901};
902
903static __net_init int ppp_init_net(struct net *net)
904{
905 struct ppp_net *pn = net_generic(net, ppp_net_id);
906
907 idr_init(&pn->units_idr);
908 mutex_init(&pn->all_ppp_mutex);
909
910 INIT_LIST_HEAD(&pn->all_channels);
911 INIT_LIST_HEAD(&pn->new_channels);
912
913 spin_lock_init(&pn->all_channels_lock);
914
915 return 0;
916}
917
918static __net_exit void ppp_exit_net(struct net *net)
919{
920 struct ppp_net *pn = net_generic(net, ppp_net_id);
921 struct net_device *dev;
922 struct net_device *aux;
923 struct ppp *ppp;
924 LIST_HEAD(list);
925 int id;
926
927 rtnl_lock();
928 for_each_netdev_safe(net, dev, aux) {
929 if (dev->netdev_ops == &ppp_netdev_ops)
930 unregister_netdevice_queue(dev, &list);
931 }
932
933 idr_for_each_entry(&pn->units_idr, ppp, id)
934 /* Skip devices already unregistered by previous loop */
935 if (!net_eq(dev_net(ppp->dev), net))
936 unregister_netdevice_queue(ppp->dev, &list);
937
938 unregister_netdevice_many(&list);
939 rtnl_unlock();
940
941 mutex_destroy(&pn->all_ppp_mutex);
942 idr_destroy(&pn->units_idr);
943 WARN_ON_ONCE(!list_empty(&pn->all_channels));
944 WARN_ON_ONCE(!list_empty(&pn->new_channels));
945}
946
947static struct pernet_operations ppp_net_ops = {
948 .init = ppp_init_net,
949 .exit = ppp_exit_net,
950 .id = &ppp_net_id,
951 .size = sizeof(struct ppp_net),
952};
953
954static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
955{
956 struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
957 int ret;
958
959 mutex_lock(&pn->all_ppp_mutex);
960
961 if (unit < 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200962 ret = unit_get(&pn->units_idr, ppp, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000963 if (ret < 0)
964 goto err;
Olivier Deprez0e641232021-09-23 10:07:05 +0200965 if (!ifname_is_set) {
966 while (1) {
967 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
968 if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
969 break;
970 unit_put(&pn->units_idr, ret);
971 ret = unit_get(&pn->units_idr, ppp, ret + 1);
972 if (ret < 0)
973 goto err;
974 }
975 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000976 } else {
977 /* Caller asked for a specific unit number. Fail with -EEXIST
978 * if unavailable. For backward compatibility, return -EEXIST
979 * too if idr allocation fails; this makes pppd retry without
980 * requesting a specific unit number.
981 */
982 if (unit_find(&pn->units_idr, unit)) {
983 ret = -EEXIST;
984 goto err;
985 }
986 ret = unit_set(&pn->units_idr, ppp, unit);
987 if (ret < 0) {
988 /* Rewrite error for backward compatibility */
989 ret = -EEXIST;
990 goto err;
991 }
992 }
993 ppp->file.index = ret;
994
995 if (!ifname_is_set)
996 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
997
998 mutex_unlock(&pn->all_ppp_mutex);
999
1000 ret = register_netdevice(ppp->dev);
1001 if (ret < 0)
1002 goto err_unit;
1003
1004 atomic_inc(&ppp_unit_count);
1005
1006 return 0;
1007
1008err_unit:
1009 mutex_lock(&pn->all_ppp_mutex);
1010 unit_put(&pn->units_idr, ppp->file.index);
1011err:
1012 mutex_unlock(&pn->all_ppp_mutex);
1013
1014 return ret;
1015}
1016
1017static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1018 const struct ppp_config *conf)
1019{
1020 struct ppp *ppp = netdev_priv(dev);
1021 int indx;
1022 int err;
1023 int cpu;
1024
1025 ppp->dev = dev;
1026 ppp->ppp_net = src_net;
1027 ppp->mru = PPP_MRU;
1028 ppp->owner = conf->file;
1029
1030 init_ppp_file(&ppp->file, INTERFACE);
1031 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
1032
1033 for (indx = 0; indx < NUM_NP; ++indx)
1034 ppp->npmode[indx] = NPMODE_PASS;
1035 INIT_LIST_HEAD(&ppp->channels);
1036 spin_lock_init(&ppp->rlock);
1037 spin_lock_init(&ppp->wlock);
1038
1039 ppp->xmit_recursion = alloc_percpu(int);
1040 if (!ppp->xmit_recursion) {
1041 err = -ENOMEM;
1042 goto err1;
1043 }
1044 for_each_possible_cpu(cpu)
1045 (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
1046
1047#ifdef CONFIG_PPP_MULTILINK
1048 ppp->minseq = -1;
1049 skb_queue_head_init(&ppp->mrq);
1050#endif /* CONFIG_PPP_MULTILINK */
1051#ifdef CONFIG_PPP_FILTER
1052 ppp->pass_filter = NULL;
1053 ppp->active_filter = NULL;
1054#endif /* CONFIG_PPP_FILTER */
1055
1056 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
1057 if (err < 0)
1058 goto err2;
1059
1060 conf->file->private_data = &ppp->file;
1061
1062 return 0;
1063err2:
1064 free_percpu(ppp->xmit_recursion);
1065err1:
1066 return err;
1067}
1068
1069static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
1070 [IFLA_PPP_DEV_FD] = { .type = NLA_S32 },
1071};
1072
1073static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
1074 struct netlink_ext_ack *extack)
1075{
1076 if (!data)
1077 return -EINVAL;
1078
1079 if (!data[IFLA_PPP_DEV_FD])
1080 return -EINVAL;
1081 if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
1082 return -EBADF;
1083
1084 return 0;
1085}
1086
1087static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
1088 struct nlattr *tb[], struct nlattr *data[],
1089 struct netlink_ext_ack *extack)
1090{
1091 struct ppp_config conf = {
1092 .unit = -1,
1093 .ifname_is_set = true,
1094 };
1095 struct file *file;
1096 int err;
1097
1098 file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
1099 if (!file)
1100 return -EBADF;
1101
1102 /* rtnl_lock is already held here, but ppp_create_interface() locks
1103 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
1104 * possible deadlock due to lock order inversion, at the cost of
1105 * pushing the problem back to userspace.
1106 */
1107 if (!mutex_trylock(&ppp_mutex)) {
1108 err = -EBUSY;
1109 goto out;
1110 }
1111
1112 if (file->f_op != &ppp_device_fops || file->private_data) {
1113 err = -EBADF;
1114 goto out_unlock;
1115 }
1116
1117 conf.file = file;
1118
1119 /* Don't use device name generated by the rtnetlink layer when ifname
1120 * isn't specified. Let ppp_dev_configure() set the device name using
1121 * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
1122 * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
1123 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001124 if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001125 conf.ifname_is_set = false;
1126
1127 err = ppp_dev_configure(src_net, dev, &conf);
1128
1129out_unlock:
1130 mutex_unlock(&ppp_mutex);
1131out:
1132 fput(file);
1133
1134 return err;
1135}
1136
1137static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
1138{
1139 unregister_netdevice_queue(dev, head);
1140}
1141
1142static size_t ppp_nl_get_size(const struct net_device *dev)
1143{
1144 return 0;
1145}
1146
1147static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1148{
1149 return 0;
1150}
1151
1152static struct net *ppp_nl_get_link_net(const struct net_device *dev)
1153{
1154 struct ppp *ppp = netdev_priv(dev);
1155
1156 return ppp->ppp_net;
1157}
1158
1159static struct rtnl_link_ops ppp_link_ops __read_mostly = {
1160 .kind = "ppp",
1161 .maxtype = IFLA_PPP_MAX,
1162 .policy = ppp_nl_policy,
1163 .priv_size = sizeof(struct ppp),
1164 .setup = ppp_setup,
1165 .validate = ppp_nl_validate,
1166 .newlink = ppp_nl_newlink,
1167 .dellink = ppp_nl_dellink,
1168 .get_size = ppp_nl_get_size,
1169 .fill_info = ppp_nl_fill_info,
1170 .get_link_net = ppp_nl_get_link_net,
1171};
1172
1173#define PPP_MAJOR 108
1174
1175/* Called at boot time if ppp is compiled into the kernel,
1176 or at module load time (from init_module) if compiled as a module. */
1177static int __init ppp_init(void)
1178{
1179 int err;
1180
1181 pr_info("PPP generic driver version " PPP_VERSION "\n");
1182
1183 err = register_pernet_device(&ppp_net_ops);
1184 if (err) {
1185 pr_err("failed to register PPP pernet device (%d)\n", err);
1186 goto out;
1187 }
1188
1189 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
1190 if (err) {
1191 pr_err("failed to register PPP device (%d)\n", err);
1192 goto out_net;
1193 }
1194
1195 ppp_class = class_create(THIS_MODULE, "ppp");
1196 if (IS_ERR(ppp_class)) {
1197 err = PTR_ERR(ppp_class);
1198 goto out_chrdev;
1199 }
1200
1201 err = rtnl_link_register(&ppp_link_ops);
1202 if (err) {
1203 pr_err("failed to register rtnetlink PPP handler\n");
1204 goto out_class;
1205 }
1206
1207 /* not a big deal if we fail here :-) */
1208 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
1209
1210 return 0;
1211
1212out_class:
1213 class_destroy(ppp_class);
1214out_chrdev:
1215 unregister_chrdev(PPP_MAJOR, "ppp");
1216out_net:
1217 unregister_pernet_device(&ppp_net_ops);
1218out:
1219 return err;
1220}
1221
1222/*
1223 * Network interface unit routines.
1224 */
1225static netdev_tx_t
1226ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1227{
1228 struct ppp *ppp = netdev_priv(dev);
1229 int npi, proto;
1230 unsigned char *pp;
1231
1232 npi = ethertype_to_npindex(ntohs(skb->protocol));
1233 if (npi < 0)
1234 goto outf;
1235
1236 /* Drop, accept or reject the packet */
1237 switch (ppp->npmode[npi]) {
1238 case NPMODE_PASS:
1239 break;
1240 case NPMODE_QUEUE:
1241 /* it would be nice to have a way to tell the network
1242 system to queue this one up for later. */
1243 goto outf;
1244 case NPMODE_DROP:
1245 case NPMODE_ERROR:
1246 goto outf;
1247 }
1248
1249 /* Put the 2-byte PPP protocol number on the front,
1250 making sure there is room for the address and control fields. */
1251 if (skb_cow_head(skb, PPP_HDRLEN))
1252 goto outf;
1253
1254 pp = skb_push(skb, 2);
1255 proto = npindex_to_proto[npi];
1256 put_unaligned_be16(proto, pp);
1257
1258 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1259 ppp_xmit_process(ppp, skb);
1260
1261 return NETDEV_TX_OK;
1262
1263 outf:
1264 kfree_skb(skb);
1265 ++dev->stats.tx_dropped;
1266 return NETDEV_TX_OK;
1267}
1268
1269static int
1270ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1271{
1272 struct ppp *ppp = netdev_priv(dev);
1273 int err = -EFAULT;
1274 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
1275 struct ppp_stats stats;
1276 struct ppp_comp_stats cstats;
1277 char *vers;
1278
1279 switch (cmd) {
1280 case SIOCGPPPSTATS:
1281 ppp_get_stats(ppp, &stats);
1282 if (copy_to_user(addr, &stats, sizeof(stats)))
1283 break;
1284 err = 0;
1285 break;
1286
1287 case SIOCGPPPCSTATS:
1288 memset(&cstats, 0, sizeof(cstats));
1289 if (ppp->xc_state)
1290 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1291 if (ppp->rc_state)
1292 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1293 if (copy_to_user(addr, &cstats, sizeof(cstats)))
1294 break;
1295 err = 0;
1296 break;
1297
1298 case SIOCGPPPVER:
1299 vers = PPP_VERSION;
1300 if (copy_to_user(addr, vers, strlen(vers) + 1))
1301 break;
1302 err = 0;
1303 break;
1304
1305 default:
1306 err = -EINVAL;
1307 }
1308
1309 return err;
1310}
1311
1312static void
1313ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1314{
1315 struct ppp *ppp = netdev_priv(dev);
1316
1317 ppp_recv_lock(ppp);
1318 stats64->rx_packets = ppp->stats64.rx_packets;
1319 stats64->rx_bytes = ppp->stats64.rx_bytes;
1320 ppp_recv_unlock(ppp);
1321
1322 ppp_xmit_lock(ppp);
1323 stats64->tx_packets = ppp->stats64.tx_packets;
1324 stats64->tx_bytes = ppp->stats64.tx_bytes;
1325 ppp_xmit_unlock(ppp);
1326
1327 stats64->rx_errors = dev->stats.rx_errors;
1328 stats64->tx_errors = dev->stats.tx_errors;
1329 stats64->rx_dropped = dev->stats.rx_dropped;
1330 stats64->tx_dropped = dev->stats.tx_dropped;
1331 stats64->rx_length_errors = dev->stats.rx_length_errors;
1332}
1333
1334static int ppp_dev_init(struct net_device *dev)
1335{
1336 struct ppp *ppp;
1337
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001338 ppp = netdev_priv(dev);
1339 /* Let the netdevice take a reference on the ppp file. This ensures
1340 * that ppp_destroy_interface() won't run before the device gets
1341 * unregistered.
1342 */
1343 refcount_inc(&ppp->file.refcnt);
1344
1345 return 0;
1346}
1347
1348static void ppp_dev_uninit(struct net_device *dev)
1349{
1350 struct ppp *ppp = netdev_priv(dev);
1351 struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1352
1353 ppp_lock(ppp);
1354 ppp->closing = 1;
1355 ppp_unlock(ppp);
1356
1357 mutex_lock(&pn->all_ppp_mutex);
1358 unit_put(&pn->units_idr, ppp->file.index);
1359 mutex_unlock(&pn->all_ppp_mutex);
1360
1361 ppp->owner = NULL;
1362
1363 ppp->file.dead = 1;
1364 wake_up_interruptible(&ppp->file.rwait);
1365}
1366
1367static void ppp_dev_priv_destructor(struct net_device *dev)
1368{
1369 struct ppp *ppp;
1370
1371 ppp = netdev_priv(dev);
1372 if (refcount_dec_and_test(&ppp->file.refcnt))
1373 ppp_destroy_interface(ppp);
1374}
1375
1376static const struct net_device_ops ppp_netdev_ops = {
1377 .ndo_init = ppp_dev_init,
1378 .ndo_uninit = ppp_dev_uninit,
1379 .ndo_start_xmit = ppp_start_xmit,
1380 .ndo_do_ioctl = ppp_net_ioctl,
1381 .ndo_get_stats64 = ppp_get_stats64,
1382};
1383
1384static struct device_type ppp_type = {
1385 .name = "ppp",
1386};
1387
1388static void ppp_setup(struct net_device *dev)
1389{
1390 dev->netdev_ops = &ppp_netdev_ops;
1391 SET_NETDEV_DEVTYPE(dev, &ppp_type);
1392
1393 dev->features |= NETIF_F_LLTX;
1394
1395 dev->hard_header_len = PPP_HDRLEN;
1396 dev->mtu = PPP_MRU;
1397 dev->addr_len = 0;
1398 dev->tx_queue_len = 3;
1399 dev->type = ARPHRD_PPP;
1400 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1401 dev->priv_destructor = ppp_dev_priv_destructor;
1402 netif_keep_dst(dev);
1403}
1404
1405/*
1406 * Transmit-side routines.
1407 */
1408
1409/* Called to do any work queued up on the transmit side that can now be done */
1410static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1411{
1412 ppp_xmit_lock(ppp);
1413 if (!ppp->closing) {
1414 ppp_push(ppp);
1415
1416 if (skb)
1417 skb_queue_tail(&ppp->file.xq, skb);
1418 while (!ppp->xmit_pending &&
1419 (skb = skb_dequeue(&ppp->file.xq)))
1420 ppp_send_frame(ppp, skb);
1421 /* If there's no work left to do, tell the core net
1422 code that we can accept some more. */
1423 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1424 netif_wake_queue(ppp->dev);
1425 else
1426 netif_stop_queue(ppp->dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001427 } else {
1428 kfree_skb(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001429 }
1430 ppp_xmit_unlock(ppp);
1431}
1432
1433static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1434{
1435 local_bh_disable();
1436
1437 if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
1438 goto err;
1439
1440 (*this_cpu_ptr(ppp->xmit_recursion))++;
1441 __ppp_xmit_process(ppp, skb);
1442 (*this_cpu_ptr(ppp->xmit_recursion))--;
1443
1444 local_bh_enable();
1445
1446 return;
1447
1448err:
1449 local_bh_enable();
1450
1451 kfree_skb(skb);
1452
1453 if (net_ratelimit())
1454 netdev_err(ppp->dev, "recursion detected\n");
1455}
1456
1457static inline struct sk_buff *
1458pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1459{
1460 struct sk_buff *new_skb;
1461 int len;
1462 int new_skb_size = ppp->dev->mtu +
1463 ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1464 int compressor_skb_size = ppp->dev->mtu +
1465 ppp->xcomp->comp_extra + PPP_HDRLEN;
1466 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1467 if (!new_skb) {
1468 if (net_ratelimit())
1469 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1470 return NULL;
1471 }
1472 if (ppp->dev->hard_header_len > PPP_HDRLEN)
1473 skb_reserve(new_skb,
1474 ppp->dev->hard_header_len - PPP_HDRLEN);
1475
1476 /* compressor still expects A/C bytes in hdr */
1477 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1478 new_skb->data, skb->len + 2,
1479 compressor_skb_size);
1480 if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1481 consume_skb(skb);
1482 skb = new_skb;
1483 skb_put(skb, len);
1484 skb_pull(skb, 2); /* pull off A/C bytes */
1485 } else if (len == 0) {
1486 /* didn't compress, or CCP not up yet */
1487 consume_skb(new_skb);
1488 new_skb = skb;
1489 } else {
1490 /*
1491 * (len < 0)
1492 * MPPE requires that we do not send unencrypted
1493 * frames. The compressor will return -1 if we
1494 * should drop the frame. We cannot simply test
1495 * the compress_proto because MPPE and MPPC share
1496 * the same number.
1497 */
1498 if (net_ratelimit())
1499 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1500 kfree_skb(skb);
1501 consume_skb(new_skb);
1502 new_skb = NULL;
1503 }
1504 return new_skb;
1505}
1506
1507/*
1508 * Compress and send a frame.
1509 * The caller should have locked the xmit path,
1510 * and xmit_pending should be 0.
1511 */
1512static void
1513ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1514{
1515 int proto = PPP_PROTO(skb);
1516 struct sk_buff *new_skb;
1517 int len;
1518 unsigned char *cp;
1519
1520 if (proto < 0x8000) {
1521#ifdef CONFIG_PPP_FILTER
1522 /* check if we should pass this packet */
1523 /* the filter instructions are constructed assuming
1524 a four-byte PPP header on each packet */
1525 *(u8 *)skb_push(skb, 2) = 1;
1526 if (ppp->pass_filter &&
1527 BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
1528 if (ppp->debug & 1)
1529 netdev_printk(KERN_DEBUG, ppp->dev,
1530 "PPP: outbound frame "
1531 "not passed\n");
1532 kfree_skb(skb);
1533 return;
1534 }
1535 /* if this packet passes the active filter, record the time */
1536 if (!(ppp->active_filter &&
1537 BPF_PROG_RUN(ppp->active_filter, skb) == 0))
1538 ppp->last_xmit = jiffies;
1539 skb_pull(skb, 2);
1540#else
1541 /* for data packets, record the time */
1542 ppp->last_xmit = jiffies;
1543#endif /* CONFIG_PPP_FILTER */
1544 }
1545
1546 ++ppp->stats64.tx_packets;
1547 ppp->stats64.tx_bytes += skb->len - 2;
1548
1549 switch (proto) {
1550 case PPP_IP:
1551 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1552 break;
1553 /* try to do VJ TCP header compression */
1554 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1555 GFP_ATOMIC);
1556 if (!new_skb) {
1557 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1558 goto drop;
1559 }
1560 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1561 cp = skb->data + 2;
1562 len = slhc_compress(ppp->vj, cp, skb->len - 2,
1563 new_skb->data + 2, &cp,
1564 !(ppp->flags & SC_NO_TCP_CCID));
1565 if (cp == skb->data + 2) {
1566 /* didn't compress */
1567 consume_skb(new_skb);
1568 } else {
1569 if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1570 proto = PPP_VJC_COMP;
1571 cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1572 } else {
1573 proto = PPP_VJC_UNCOMP;
1574 cp[0] = skb->data[2];
1575 }
1576 consume_skb(skb);
1577 skb = new_skb;
1578 cp = skb_put(skb, len + 2);
1579 cp[0] = 0;
1580 cp[1] = proto;
1581 }
1582 break;
1583
1584 case PPP_CCP:
1585 /* peek at outbound CCP frames */
1586 ppp_ccp_peek(ppp, skb, 0);
1587 break;
1588 }
1589
1590 /* try to do packet compression */
1591 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1592 proto != PPP_LCP && proto != PPP_CCP) {
1593 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1594 if (net_ratelimit())
1595 netdev_err(ppp->dev,
1596 "ppp: compression required but "
1597 "down - pkt dropped.\n");
1598 goto drop;
1599 }
1600 skb = pad_compress_skb(ppp, skb);
1601 if (!skb)
1602 goto drop;
1603 }
1604
1605 /*
1606 * If we are waiting for traffic (demand dialling),
1607 * queue it up for pppd to receive.
1608 */
1609 if (ppp->flags & SC_LOOP_TRAFFIC) {
1610 if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1611 goto drop;
1612 skb_queue_tail(&ppp->file.rq, skb);
1613 wake_up_interruptible(&ppp->file.rwait);
1614 return;
1615 }
1616
1617 ppp->xmit_pending = skb;
1618 ppp_push(ppp);
1619 return;
1620
1621 drop:
1622 kfree_skb(skb);
1623 ++ppp->dev->stats.tx_errors;
1624}
1625
1626/*
1627 * Try to send the frame in xmit_pending.
1628 * The caller should have the xmit path locked.
1629 */
1630static void
1631ppp_push(struct ppp *ppp)
1632{
1633 struct list_head *list;
1634 struct channel *pch;
1635 struct sk_buff *skb = ppp->xmit_pending;
1636
1637 if (!skb)
1638 return;
1639
1640 list = &ppp->channels;
1641 if (list_empty(list)) {
1642 /* nowhere to send the packet, just drop it */
1643 ppp->xmit_pending = NULL;
1644 kfree_skb(skb);
1645 return;
1646 }
1647
1648 if ((ppp->flags & SC_MULTILINK) == 0) {
1649 /* not doing multilink: send it down the first channel */
1650 list = list->next;
1651 pch = list_entry(list, struct channel, clist);
1652
1653 spin_lock(&pch->downl);
1654 if (pch->chan) {
1655 if (pch->chan->ops->start_xmit(pch->chan, skb))
1656 ppp->xmit_pending = NULL;
1657 } else {
1658 /* channel got unregistered */
1659 kfree_skb(skb);
1660 ppp->xmit_pending = NULL;
1661 }
1662 spin_unlock(&pch->downl);
1663 return;
1664 }
1665
1666#ifdef CONFIG_PPP_MULTILINK
1667 /* Multilink: fragment the packet over as many links
1668 as can take the packet at the moment. */
1669 if (!ppp_mp_explode(ppp, skb))
1670 return;
1671#endif /* CONFIG_PPP_MULTILINK */
1672
1673 ppp->xmit_pending = NULL;
1674 kfree_skb(skb);
1675}
1676
1677#ifdef CONFIG_PPP_MULTILINK
1678static bool mp_protocol_compress __read_mostly = true;
1679module_param(mp_protocol_compress, bool, 0644);
1680MODULE_PARM_DESC(mp_protocol_compress,
1681 "compress protocol id in multilink fragments");
1682
1683/*
1684 * Divide a packet to be transmitted into fragments and
1685 * send them out the individual links.
1686 */
1687static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1688{
1689 int len, totlen;
1690 int i, bits, hdrlen, mtu;
1691 int flen;
1692 int navail, nfree, nzero;
1693 int nbigger;
1694 int totspeed;
1695 int totfree;
1696 unsigned char *p, *q;
1697 struct list_head *list;
1698 struct channel *pch;
1699 struct sk_buff *frag;
1700 struct ppp_channel *chan;
1701
1702 totspeed = 0; /*total bitrate of the bundle*/
1703 nfree = 0; /* # channels which have no packet already queued */
1704 navail = 0; /* total # of usable channels (not deregistered) */
1705 nzero = 0; /* number of channels with zero speed associated*/
1706 totfree = 0; /*total # of channels available and
1707 *having no queued packets before
1708 *starting the fragmentation*/
1709
1710 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1711 i = 0;
1712 list_for_each_entry(pch, &ppp->channels, clist) {
1713 if (pch->chan) {
1714 pch->avail = 1;
1715 navail++;
1716 pch->speed = pch->chan->speed;
1717 } else {
1718 pch->avail = 0;
1719 }
1720 if (pch->avail) {
1721 if (skb_queue_empty(&pch->file.xq) ||
1722 !pch->had_frag) {
1723 if (pch->speed == 0)
1724 nzero++;
1725 else
1726 totspeed += pch->speed;
1727
1728 pch->avail = 2;
1729 ++nfree;
1730 ++totfree;
1731 }
1732 if (!pch->had_frag && i < ppp->nxchan)
1733 ppp->nxchan = i;
1734 }
1735 ++i;
1736 }
1737 /*
1738 * Don't start sending this packet unless at least half of
1739 * the channels are free. This gives much better TCP
1740 * performance if we have a lot of channels.
1741 */
1742 if (nfree == 0 || nfree < navail / 2)
1743 return 0; /* can't take now, leave it in xmit_pending */
1744
1745 /* Do protocol field compression */
1746 p = skb->data;
1747 len = skb->len;
1748 if (*p == 0 && mp_protocol_compress) {
1749 ++p;
1750 --len;
1751 }
1752
1753 totlen = len;
1754 nbigger = len % nfree;
1755
1756 /* skip to the channel after the one we last used
1757 and start at that one */
1758 list = &ppp->channels;
1759 for (i = 0; i < ppp->nxchan; ++i) {
1760 list = list->next;
1761 if (list == &ppp->channels) {
1762 i = 0;
1763 break;
1764 }
1765 }
1766
1767 /* create a fragment for each channel */
1768 bits = B;
1769 while (len > 0) {
1770 list = list->next;
1771 if (list == &ppp->channels) {
1772 i = 0;
1773 continue;
1774 }
1775 pch = list_entry(list, struct channel, clist);
1776 ++i;
1777 if (!pch->avail)
1778 continue;
1779
1780 /*
1781 * Skip this channel if it has a fragment pending already and
1782 * we haven't given a fragment to all of the free channels.
1783 */
1784 if (pch->avail == 1) {
1785 if (nfree > 0)
1786 continue;
1787 } else {
1788 pch->avail = 1;
1789 }
1790
1791 /* check the channel's mtu and whether it is still attached. */
1792 spin_lock(&pch->downl);
1793 if (pch->chan == NULL) {
1794 /* can't use this channel, it's being deregistered */
1795 if (pch->speed == 0)
1796 nzero--;
1797 else
1798 totspeed -= pch->speed;
1799
1800 spin_unlock(&pch->downl);
1801 pch->avail = 0;
1802 totlen = len;
1803 totfree--;
1804 nfree--;
1805 if (--navail == 0)
1806 break;
1807 continue;
1808 }
1809
1810 /*
1811 *if the channel speed is not set divide
1812 *the packet evenly among the free channels;
1813 *otherwise divide it according to the speed
1814 *of the channel we are going to transmit on
1815 */
1816 flen = len;
1817 if (nfree > 0) {
1818 if (pch->speed == 0) {
1819 flen = len/nfree;
1820 if (nbigger > 0) {
1821 flen++;
1822 nbigger--;
1823 }
1824 } else {
1825 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1826 ((totspeed*totfree)/pch->speed)) - hdrlen;
1827 if (nbigger > 0) {
1828 flen += ((totfree - nzero)*pch->speed)/totspeed;
1829 nbigger -= ((totfree - nzero)*pch->speed)/
1830 totspeed;
1831 }
1832 }
1833 nfree--;
1834 }
1835
1836 /*
1837 *check if we are on the last channel or
1838 *we exceded the length of the data to
1839 *fragment
1840 */
1841 if ((nfree <= 0) || (flen > len))
1842 flen = len;
1843 /*
1844 *it is not worth to tx on slow channels:
1845 *in that case from the resulting flen according to the
1846 *above formula will be equal or less than zero.
1847 *Skip the channel in this case
1848 */
1849 if (flen <= 0) {
1850 pch->avail = 2;
1851 spin_unlock(&pch->downl);
1852 continue;
1853 }
1854
1855 /*
1856 * hdrlen includes the 2-byte PPP protocol field, but the
1857 * MTU counts only the payload excluding the protocol field.
1858 * (RFC1661 Section 2)
1859 */
1860 mtu = pch->chan->mtu - (hdrlen - 2);
1861 if (mtu < 4)
1862 mtu = 4;
1863 if (flen > mtu)
1864 flen = mtu;
1865 if (flen == len)
1866 bits |= E;
1867 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1868 if (!frag)
1869 goto noskb;
1870 q = skb_put(frag, flen + hdrlen);
1871
1872 /* make the MP header */
1873 put_unaligned_be16(PPP_MP, q);
1874 if (ppp->flags & SC_MP_XSHORTSEQ) {
1875 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1876 q[3] = ppp->nxseq;
1877 } else {
1878 q[2] = bits;
1879 q[3] = ppp->nxseq >> 16;
1880 q[4] = ppp->nxseq >> 8;
1881 q[5] = ppp->nxseq;
1882 }
1883
1884 memcpy(q + hdrlen, p, flen);
1885
1886 /* try to send it down the channel */
1887 chan = pch->chan;
1888 if (!skb_queue_empty(&pch->file.xq) ||
1889 !chan->ops->start_xmit(chan, frag))
1890 skb_queue_tail(&pch->file.xq, frag);
1891 pch->had_frag = 1;
1892 p += flen;
1893 len -= flen;
1894 ++ppp->nxseq;
1895 bits = 0;
1896 spin_unlock(&pch->downl);
1897 }
1898 ppp->nxchan = i;
1899
1900 return 1;
1901
1902 noskb:
1903 spin_unlock(&pch->downl);
1904 if (ppp->debug & 1)
1905 netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1906 ++ppp->dev->stats.tx_errors;
1907 ++ppp->nxseq;
1908 return 1; /* abandon the frame */
1909}
1910#endif /* CONFIG_PPP_MULTILINK */
1911
1912/* Try to send data out on a channel */
1913static void __ppp_channel_push(struct channel *pch)
1914{
1915 struct sk_buff *skb;
1916 struct ppp *ppp;
1917
1918 spin_lock(&pch->downl);
1919 if (pch->chan) {
1920 while (!skb_queue_empty(&pch->file.xq)) {
1921 skb = skb_dequeue(&pch->file.xq);
1922 if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
1923 /* put the packet back and try again later */
1924 skb_queue_head(&pch->file.xq, skb);
1925 break;
1926 }
1927 }
1928 } else {
1929 /* channel got deregistered */
1930 skb_queue_purge(&pch->file.xq);
1931 }
1932 spin_unlock(&pch->downl);
1933 /* see if there is anything from the attached unit to be sent */
1934 if (skb_queue_empty(&pch->file.xq)) {
1935 ppp = pch->ppp;
1936 if (ppp)
1937 __ppp_xmit_process(ppp, NULL);
1938 }
1939}
1940
1941static void ppp_channel_push(struct channel *pch)
1942{
1943 read_lock_bh(&pch->upl);
1944 if (pch->ppp) {
1945 (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
1946 __ppp_channel_push(pch);
1947 (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
1948 } else {
1949 __ppp_channel_push(pch);
1950 }
1951 read_unlock_bh(&pch->upl);
1952}
1953
1954/*
1955 * Receive-side routines.
1956 */
1957
1958struct ppp_mp_skb_parm {
1959 u32 sequence;
1960 u8 BEbits;
1961};
1962#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
1963
1964static inline void
1965ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1966{
1967 ppp_recv_lock(ppp);
1968 if (!ppp->closing)
1969 ppp_receive_frame(ppp, skb, pch);
1970 else
1971 kfree_skb(skb);
1972 ppp_recv_unlock(ppp);
1973}
1974
David Brazdil0f672f62019-12-10 10:32:29 +00001975/**
1976 * __ppp_decompress_proto - Decompress protocol field, slim version.
1977 * @skb: Socket buffer where protocol field should be decompressed. It must have
1978 * at least 1 byte of head room and 1 byte of linear data. First byte of
1979 * data must be a protocol field byte.
1980 *
1981 * Decompress protocol field in PPP header if it's compressed, e.g. when
1982 * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
1983 * length are done in this function.
1984 */
1985static void __ppp_decompress_proto(struct sk_buff *skb)
1986{
1987 if (skb->data[0] & 0x01)
1988 *(u8 *)skb_push(skb, 1) = 0x00;
1989}
1990
1991/**
1992 * ppp_decompress_proto - Check skb data room and decompress protocol field.
1993 * @skb: Socket buffer where protocol field should be decompressed. First byte
1994 * of data must be a protocol field byte.
1995 *
1996 * Decompress protocol field in PPP header if it's compressed, e.g. when
1997 * Protocol-Field-Compression (PFC) was negotiated. This function also makes
1998 * sure that skb data room is sufficient for Protocol field, before and after
1999 * decompression.
2000 *
2001 * Return: true - decompressed successfully, false - not enough room in skb.
2002 */
2003static bool ppp_decompress_proto(struct sk_buff *skb)
2004{
2005 /* At least one byte should be present (if protocol is compressed) */
2006 if (!pskb_may_pull(skb, 1))
2007 return false;
2008
2009 __ppp_decompress_proto(skb);
2010
2011 /* Protocol field should occupy 2 bytes when not compressed */
2012 return pskb_may_pull(skb, 2);
2013}
2014
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002015void
2016ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
2017{
2018 struct channel *pch = chan->ppp;
2019 int proto;
2020
2021 if (!pch) {
2022 kfree_skb(skb);
2023 return;
2024 }
2025
2026 read_lock_bh(&pch->upl);
David Brazdil0f672f62019-12-10 10:32:29 +00002027 if (!ppp_decompress_proto(skb)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002028 kfree_skb(skb);
2029 if (pch->ppp) {
2030 ++pch->ppp->dev->stats.rx_length_errors;
2031 ppp_receive_error(pch->ppp);
2032 }
2033 goto done;
2034 }
2035
2036 proto = PPP_PROTO(skb);
2037 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
2038 /* put it on the channel queue */
2039 skb_queue_tail(&pch->file.rq, skb);
2040 /* drop old frames if queue too long */
2041 while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
2042 (skb = skb_dequeue(&pch->file.rq)))
2043 kfree_skb(skb);
2044 wake_up_interruptible(&pch->file.rwait);
2045 } else {
2046 ppp_do_recv(pch->ppp, skb, pch);
2047 }
2048
2049done:
2050 read_unlock_bh(&pch->upl);
2051}
2052
2053/* Put a 0-length skb in the receive queue as an error indication */
2054void
2055ppp_input_error(struct ppp_channel *chan, int code)
2056{
2057 struct channel *pch = chan->ppp;
2058 struct sk_buff *skb;
2059
2060 if (!pch)
2061 return;
2062
2063 read_lock_bh(&pch->upl);
2064 if (pch->ppp) {
2065 skb = alloc_skb(0, GFP_ATOMIC);
2066 if (skb) {
2067 skb->len = 0; /* probably unnecessary */
2068 skb->cb[0] = code;
2069 ppp_do_recv(pch->ppp, skb, pch);
2070 }
2071 }
2072 read_unlock_bh(&pch->upl);
2073}
2074
2075/*
2076 * We come in here to process a received frame.
2077 * The receive side of the ppp unit is locked.
2078 */
2079static void
2080ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2081{
2082 /* note: a 0-length skb is used as an error indication */
2083 if (skb->len > 0) {
2084 skb_checksum_complete_unset(skb);
2085#ifdef CONFIG_PPP_MULTILINK
2086 /* XXX do channel-level decompression here */
2087 if (PPP_PROTO(skb) == PPP_MP)
2088 ppp_receive_mp_frame(ppp, skb, pch);
2089 else
2090#endif /* CONFIG_PPP_MULTILINK */
2091 ppp_receive_nonmp_frame(ppp, skb);
2092 } else {
2093 kfree_skb(skb);
2094 ppp_receive_error(ppp);
2095 }
2096}
2097
2098static void
2099ppp_receive_error(struct ppp *ppp)
2100{
2101 ++ppp->dev->stats.rx_errors;
2102 if (ppp->vj)
2103 slhc_toss(ppp->vj);
2104}
2105
2106static void
2107ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
2108{
2109 struct sk_buff *ns;
2110 int proto, len, npi;
2111
2112 /*
2113 * Decompress the frame, if compressed.
2114 * Note that some decompressors need to see uncompressed frames
2115 * that come in as well as compressed frames.
2116 */
2117 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
2118 (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
2119 skb = ppp_decompress_frame(ppp, skb);
2120
2121 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
2122 goto err;
2123
David Brazdil0f672f62019-12-10 10:32:29 +00002124 /* At this point the "Protocol" field MUST be decompressed, either in
2125 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
2126 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002127 proto = PPP_PROTO(skb);
2128 switch (proto) {
2129 case PPP_VJC_COMP:
2130 /* decompress VJ compressed packets */
2131 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2132 goto err;
2133
2134 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
2135 /* copy to a new sk_buff with more tailroom */
2136 ns = dev_alloc_skb(skb->len + 128);
2137 if (!ns) {
2138 netdev_err(ppp->dev, "PPP: no memory "
2139 "(VJ decomp)\n");
2140 goto err;
2141 }
2142 skb_reserve(ns, 2);
2143 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
2144 consume_skb(skb);
2145 skb = ns;
2146 }
2147 else
2148 skb->ip_summed = CHECKSUM_NONE;
2149
2150 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
2151 if (len <= 0) {
2152 netdev_printk(KERN_DEBUG, ppp->dev,
2153 "PPP: VJ decompression error\n");
2154 goto err;
2155 }
2156 len += 2;
2157 if (len > skb->len)
2158 skb_put(skb, len - skb->len);
2159 else if (len < skb->len)
2160 skb_trim(skb, len);
2161 proto = PPP_IP;
2162 break;
2163
2164 case PPP_VJC_UNCOMP:
2165 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2166 goto err;
2167
2168 /* Until we fix the decompressor need to make sure
2169 * data portion is linear.
2170 */
2171 if (!pskb_may_pull(skb, skb->len))
2172 goto err;
2173
2174 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
2175 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
2176 goto err;
2177 }
2178 proto = PPP_IP;
2179 break;
2180
2181 case PPP_CCP:
2182 ppp_ccp_peek(ppp, skb, 1);
2183 break;
2184 }
2185
2186 ++ppp->stats64.rx_packets;
2187 ppp->stats64.rx_bytes += skb->len - 2;
2188
2189 npi = proto_to_npindex(proto);
2190 if (npi < 0) {
2191 /* control or unknown frame - pass it to pppd */
2192 skb_queue_tail(&ppp->file.rq, skb);
2193 /* limit queue length by dropping old frames */
2194 while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
2195 (skb = skb_dequeue(&ppp->file.rq)))
2196 kfree_skb(skb);
2197 /* wake up any process polling or blocking on read */
2198 wake_up_interruptible(&ppp->file.rwait);
2199
2200 } else {
2201 /* network protocol frame - give it to the kernel */
2202
2203#ifdef CONFIG_PPP_FILTER
2204 /* check if the packet passes the pass and active filters */
2205 /* the filter instructions are constructed assuming
2206 a four-byte PPP header on each packet */
2207 if (ppp->pass_filter || ppp->active_filter) {
2208 if (skb_unclone(skb, GFP_ATOMIC))
2209 goto err;
2210
2211 *(u8 *)skb_push(skb, 2) = 0;
2212 if (ppp->pass_filter &&
2213 BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
2214 if (ppp->debug & 1)
2215 netdev_printk(KERN_DEBUG, ppp->dev,
2216 "PPP: inbound frame "
2217 "not passed\n");
2218 kfree_skb(skb);
2219 return;
2220 }
2221 if (!(ppp->active_filter &&
2222 BPF_PROG_RUN(ppp->active_filter, skb) == 0))
2223 ppp->last_recv = jiffies;
2224 __skb_pull(skb, 2);
2225 } else
2226#endif /* CONFIG_PPP_FILTER */
2227 ppp->last_recv = jiffies;
2228
2229 if ((ppp->dev->flags & IFF_UP) == 0 ||
2230 ppp->npmode[npi] != NPMODE_PASS) {
2231 kfree_skb(skb);
2232 } else {
2233 /* chop off protocol */
2234 skb_pull_rcsum(skb, 2);
2235 skb->dev = ppp->dev;
2236 skb->protocol = htons(npindex_to_ethertype[npi]);
2237 skb_reset_mac_header(skb);
2238 skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
2239 dev_net(ppp->dev)));
2240 netif_rx(skb);
2241 }
2242 }
2243 return;
2244
2245 err:
2246 kfree_skb(skb);
2247 ppp_receive_error(ppp);
2248}
2249
2250static struct sk_buff *
2251ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
2252{
2253 int proto = PPP_PROTO(skb);
2254 struct sk_buff *ns;
2255 int len;
2256
2257 /* Until we fix all the decompressor's need to make sure
2258 * data portion is linear.
2259 */
2260 if (!pskb_may_pull(skb, skb->len))
2261 goto err;
2262
2263 if (proto == PPP_COMP) {
2264 int obuff_size;
2265
2266 switch(ppp->rcomp->compress_proto) {
2267 case CI_MPPE:
2268 obuff_size = ppp->mru + PPP_HDRLEN + 1;
2269 break;
2270 default:
2271 obuff_size = ppp->mru + PPP_HDRLEN;
2272 break;
2273 }
2274
2275 ns = dev_alloc_skb(obuff_size);
2276 if (!ns) {
2277 netdev_err(ppp->dev, "ppp_decompress_frame: "
2278 "no memory\n");
2279 goto err;
2280 }
2281 /* the decompressor still expects the A/C bytes in the hdr */
2282 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
2283 skb->len + 2, ns->data, obuff_size);
2284 if (len < 0) {
2285 /* Pass the compressed frame to pppd as an
2286 error indication. */
2287 if (len == DECOMP_FATALERROR)
2288 ppp->rstate |= SC_DC_FERROR;
2289 kfree_skb(ns);
2290 goto err;
2291 }
2292
2293 consume_skb(skb);
2294 skb = ns;
2295 skb_put(skb, len);
2296 skb_pull(skb, 2); /* pull off the A/C bytes */
2297
David Brazdil0f672f62019-12-10 10:32:29 +00002298 /* Don't call __ppp_decompress_proto() here, but instead rely on
2299 * corresponding algo (mppe/bsd/deflate) to decompress it.
2300 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002301 } else {
2302 /* Uncompressed frame - pass to decompressor so it
2303 can update its dictionary if necessary. */
2304 if (ppp->rcomp->incomp)
2305 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
2306 skb->len + 2);
2307 }
2308
2309 return skb;
2310
2311 err:
2312 ppp->rstate |= SC_DC_ERROR;
2313 ppp_receive_error(ppp);
2314 return skb;
2315}
2316
2317#ifdef CONFIG_PPP_MULTILINK
2318/*
2319 * Receive a multilink frame.
2320 * We put it on the reconstruction queue and then pull off
2321 * as many completed frames as we can.
2322 */
2323static void
2324ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2325{
2326 u32 mask, seq;
2327 struct channel *ch;
2328 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
2329
2330 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
2331 goto err; /* no good, throw it away */
2332
2333 /* Decode sequence number and begin/end bits */
2334 if (ppp->flags & SC_MP_SHORTSEQ) {
2335 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
2336 mask = 0xfff;
2337 } else {
2338 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
2339 mask = 0xffffff;
2340 }
2341 PPP_MP_CB(skb)->BEbits = skb->data[2];
2342 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
2343
2344 /*
2345 * Do protocol ID decompression on the first fragment of each packet.
David Brazdil0f672f62019-12-10 10:32:29 +00002346 * We have to do that here, because ppp_receive_nonmp_frame() expects
2347 * decompressed protocol field.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002348 */
David Brazdil0f672f62019-12-10 10:32:29 +00002349 if (PPP_MP_CB(skb)->BEbits & B)
2350 __ppp_decompress_proto(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002351
2352 /*
2353 * Expand sequence number to 32 bits, making it as close
2354 * as possible to ppp->minseq.
2355 */
2356 seq |= ppp->minseq & ~mask;
2357 if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
2358 seq += mask + 1;
2359 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
2360 seq -= mask + 1; /* should never happen */
2361 PPP_MP_CB(skb)->sequence = seq;
2362 pch->lastseq = seq;
2363
2364 /*
2365 * If this packet comes before the next one we were expecting,
2366 * drop it.
2367 */
2368 if (seq_before(seq, ppp->nextseq)) {
2369 kfree_skb(skb);
2370 ++ppp->dev->stats.rx_dropped;
2371 ppp_receive_error(ppp);
2372 return;
2373 }
2374
2375 /*
2376 * Reevaluate minseq, the minimum over all channels of the
2377 * last sequence number received on each channel. Because of
2378 * the increasing sequence number rule, we know that any fragment
2379 * before `minseq' which hasn't arrived is never going to arrive.
2380 * The list of channels can't change because we have the receive
2381 * side of the ppp unit locked.
2382 */
2383 list_for_each_entry(ch, &ppp->channels, clist) {
2384 if (seq_before(ch->lastseq, seq))
2385 seq = ch->lastseq;
2386 }
2387 if (seq_before(ppp->minseq, seq))
2388 ppp->minseq = seq;
2389
2390 /* Put the fragment on the reconstruction queue */
2391 ppp_mp_insert(ppp, skb);
2392
2393 /* If the queue is getting long, don't wait any longer for packets
2394 before the start of the queue. */
2395 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
2396 struct sk_buff *mskb = skb_peek(&ppp->mrq);
2397 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
2398 ppp->minseq = PPP_MP_CB(mskb)->sequence;
2399 }
2400
2401 /* Pull completed packets off the queue and receive them. */
2402 while ((skb = ppp_mp_reconstruct(ppp))) {
2403 if (pskb_may_pull(skb, 2))
2404 ppp_receive_nonmp_frame(ppp, skb);
2405 else {
2406 ++ppp->dev->stats.rx_length_errors;
2407 kfree_skb(skb);
2408 ppp_receive_error(ppp);
2409 }
2410 }
2411
2412 return;
2413
2414 err:
2415 kfree_skb(skb);
2416 ppp_receive_error(ppp);
2417}
2418
2419/*
2420 * Insert a fragment on the MP reconstruction queue.
2421 * The queue is ordered by increasing sequence number.
2422 */
2423static void
2424ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
2425{
2426 struct sk_buff *p;
2427 struct sk_buff_head *list = &ppp->mrq;
2428 u32 seq = PPP_MP_CB(skb)->sequence;
2429
2430 /* N.B. we don't need to lock the list lock because we have the
2431 ppp unit receive-side lock. */
2432 skb_queue_walk(list, p) {
2433 if (seq_before(seq, PPP_MP_CB(p)->sequence))
2434 break;
2435 }
2436 __skb_queue_before(list, p, skb);
2437}
2438
2439/*
2440 * Reconstruct a packet from the MP fragment queue.
2441 * We go through increasing sequence numbers until we find a
2442 * complete packet, or we get to the sequence number for a fragment
2443 * which hasn't arrived but might still do so.
2444 */
2445static struct sk_buff *
2446ppp_mp_reconstruct(struct ppp *ppp)
2447{
2448 u32 seq = ppp->nextseq;
2449 u32 minseq = ppp->minseq;
2450 struct sk_buff_head *list = &ppp->mrq;
2451 struct sk_buff *p, *tmp;
2452 struct sk_buff *head, *tail;
2453 struct sk_buff *skb = NULL;
2454 int lost = 0, len = 0;
2455
2456 if (ppp->mrru == 0) /* do nothing until mrru is set */
2457 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002458 head = __skb_peek(list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002459 tail = NULL;
2460 skb_queue_walk_safe(list, p, tmp) {
2461 again:
2462 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2463 /* this can't happen, anyway ignore the skb */
2464 netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2465 "seq %u < %u\n",
2466 PPP_MP_CB(p)->sequence, seq);
2467 __skb_unlink(p, list);
2468 kfree_skb(p);
2469 continue;
2470 }
2471 if (PPP_MP_CB(p)->sequence != seq) {
2472 u32 oldseq;
2473 /* Fragment `seq' is missing. If it is after
2474 minseq, it might arrive later, so stop here. */
2475 if (seq_after(seq, minseq))
2476 break;
2477 /* Fragment `seq' is lost, keep going. */
2478 lost = 1;
2479 oldseq = seq;
2480 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2481 minseq + 1: PPP_MP_CB(p)->sequence;
2482
2483 if (ppp->debug & 1)
2484 netdev_printk(KERN_DEBUG, ppp->dev,
2485 "lost frag %u..%u\n",
2486 oldseq, seq-1);
2487
2488 goto again;
2489 }
2490
2491 /*
2492 * At this point we know that all the fragments from
2493 * ppp->nextseq to seq are either present or lost.
2494 * Also, there are no complete packets in the queue
2495 * that have no missing fragments and end before this
2496 * fragment.
2497 */
2498
2499 /* B bit set indicates this fragment starts a packet */
2500 if (PPP_MP_CB(p)->BEbits & B) {
2501 head = p;
2502 lost = 0;
2503 len = 0;
2504 }
2505
2506 len += p->len;
2507
2508 /* Got a complete packet yet? */
2509 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2510 (PPP_MP_CB(head)->BEbits & B)) {
2511 if (len > ppp->mrru + 2) {
2512 ++ppp->dev->stats.rx_length_errors;
2513 netdev_printk(KERN_DEBUG, ppp->dev,
2514 "PPP: reconstructed packet"
2515 " is too long (%d)\n", len);
2516 } else {
2517 tail = p;
2518 break;
2519 }
2520 ppp->nextseq = seq + 1;
2521 }
2522
2523 /*
2524 * If this is the ending fragment of a packet,
2525 * and we haven't found a complete valid packet yet,
2526 * we can discard up to and including this fragment.
2527 */
2528 if (PPP_MP_CB(p)->BEbits & E) {
2529 struct sk_buff *tmp2;
2530
2531 skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2532 if (ppp->debug & 1)
2533 netdev_printk(KERN_DEBUG, ppp->dev,
2534 "discarding frag %u\n",
2535 PPP_MP_CB(p)->sequence);
2536 __skb_unlink(p, list);
2537 kfree_skb(p);
2538 }
2539 head = skb_peek(list);
2540 if (!head)
2541 break;
2542 }
2543 ++seq;
2544 }
2545
2546 /* If we have a complete packet, copy it all into one skb. */
2547 if (tail != NULL) {
2548 /* If we have discarded any fragments,
2549 signal a receive error. */
2550 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2551 skb_queue_walk_safe(list, p, tmp) {
2552 if (p == head)
2553 break;
2554 if (ppp->debug & 1)
2555 netdev_printk(KERN_DEBUG, ppp->dev,
2556 "discarding frag %u\n",
2557 PPP_MP_CB(p)->sequence);
2558 __skb_unlink(p, list);
2559 kfree_skb(p);
2560 }
2561
2562 if (ppp->debug & 1)
2563 netdev_printk(KERN_DEBUG, ppp->dev,
2564 " missed pkts %u..%u\n",
2565 ppp->nextseq,
2566 PPP_MP_CB(head)->sequence-1);
2567 ++ppp->dev->stats.rx_dropped;
2568 ppp_receive_error(ppp);
2569 }
2570
2571 skb = head;
2572 if (head != tail) {
2573 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2574 p = skb_queue_next(list, head);
2575 __skb_unlink(skb, list);
2576 skb_queue_walk_from_safe(list, p, tmp) {
2577 __skb_unlink(p, list);
2578 *fragpp = p;
2579 p->next = NULL;
2580 fragpp = &p->next;
2581
2582 skb->len += p->len;
2583 skb->data_len += p->len;
2584 skb->truesize += p->truesize;
2585
2586 if (p == tail)
2587 break;
2588 }
2589 } else {
2590 __skb_unlink(skb, list);
2591 }
2592
2593 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2594 }
2595
2596 return skb;
2597}
2598#endif /* CONFIG_PPP_MULTILINK */
2599
2600/*
2601 * Channel interface.
2602 */
2603
2604/* Create a new, unattached ppp channel. */
2605int ppp_register_channel(struct ppp_channel *chan)
2606{
2607 return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2608}
2609
2610/* Create a new, unattached ppp channel for specified net. */
2611int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2612{
2613 struct channel *pch;
2614 struct ppp_net *pn;
2615
2616 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2617 if (!pch)
2618 return -ENOMEM;
2619
2620 pn = ppp_pernet(net);
2621
2622 pch->ppp = NULL;
2623 pch->chan = chan;
2624 pch->chan_net = get_net(net);
2625 chan->ppp = pch;
2626 init_ppp_file(&pch->file, CHANNEL);
2627 pch->file.hdrlen = chan->hdrlen;
2628#ifdef CONFIG_PPP_MULTILINK
2629 pch->lastseq = -1;
2630#endif /* CONFIG_PPP_MULTILINK */
2631 init_rwsem(&pch->chan_sem);
2632 spin_lock_init(&pch->downl);
2633 rwlock_init(&pch->upl);
2634
2635 spin_lock_bh(&pn->all_channels_lock);
2636 pch->file.index = ++pn->last_channel_index;
2637 list_add(&pch->list, &pn->new_channels);
2638 atomic_inc(&channel_count);
2639 spin_unlock_bh(&pn->all_channels_lock);
2640
2641 return 0;
2642}
2643
2644/*
2645 * Return the index of a channel.
2646 */
2647int ppp_channel_index(struct ppp_channel *chan)
2648{
2649 struct channel *pch = chan->ppp;
2650
2651 if (pch)
2652 return pch->file.index;
2653 return -1;
2654}
2655
2656/*
2657 * Return the PPP unit number to which a channel is connected.
2658 */
2659int ppp_unit_number(struct ppp_channel *chan)
2660{
2661 struct channel *pch = chan->ppp;
2662 int unit = -1;
2663
2664 if (pch) {
2665 read_lock_bh(&pch->upl);
2666 if (pch->ppp)
2667 unit = pch->ppp->file.index;
2668 read_unlock_bh(&pch->upl);
2669 }
2670 return unit;
2671}
2672
2673/*
2674 * Return the PPP device interface name of a channel.
2675 */
2676char *ppp_dev_name(struct ppp_channel *chan)
2677{
2678 struct channel *pch = chan->ppp;
2679 char *name = NULL;
2680
2681 if (pch) {
2682 read_lock_bh(&pch->upl);
2683 if (pch->ppp && pch->ppp->dev)
2684 name = pch->ppp->dev->name;
2685 read_unlock_bh(&pch->upl);
2686 }
2687 return name;
2688}
2689
2690
2691/*
2692 * Disconnect a channel from the generic layer.
2693 * This must be called in process context.
2694 */
2695void
2696ppp_unregister_channel(struct ppp_channel *chan)
2697{
2698 struct channel *pch = chan->ppp;
2699 struct ppp_net *pn;
2700
2701 if (!pch)
2702 return; /* should never happen */
2703
2704 chan->ppp = NULL;
2705
2706 /*
2707 * This ensures that we have returned from any calls into the
2708 * the channel's start_xmit or ioctl routine before we proceed.
2709 */
2710 down_write(&pch->chan_sem);
2711 spin_lock_bh(&pch->downl);
2712 pch->chan = NULL;
2713 spin_unlock_bh(&pch->downl);
2714 up_write(&pch->chan_sem);
2715 ppp_disconnect_channel(pch);
2716
2717 pn = ppp_pernet(pch->chan_net);
2718 spin_lock_bh(&pn->all_channels_lock);
2719 list_del(&pch->list);
2720 spin_unlock_bh(&pn->all_channels_lock);
2721
2722 pch->file.dead = 1;
2723 wake_up_interruptible(&pch->file.rwait);
2724 if (refcount_dec_and_test(&pch->file.refcnt))
2725 ppp_destroy_channel(pch);
2726}
2727
2728/*
2729 * Callback from a channel when it can accept more to transmit.
2730 * This should be called at BH/softirq level, not interrupt level.
2731 */
2732void
2733ppp_output_wakeup(struct ppp_channel *chan)
2734{
2735 struct channel *pch = chan->ppp;
2736
2737 if (!pch)
2738 return;
2739 ppp_channel_push(pch);
2740}
2741
2742/*
2743 * Compression control.
2744 */
2745
2746/* Process the PPPIOCSCOMPRESS ioctl. */
2747static int
2748ppp_set_compress(struct ppp *ppp, unsigned long arg)
2749{
2750 int err;
2751 struct compressor *cp, *ocomp;
2752 struct ppp_option_data data;
2753 void *state, *ostate;
2754 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2755
2756 err = -EFAULT;
2757 if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
2758 goto out;
2759 if (data.length > CCP_MAX_OPTION_LENGTH)
2760 goto out;
2761 if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length))
2762 goto out;
2763
2764 err = -EINVAL;
2765 if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length)
2766 goto out;
2767
2768 cp = try_then_request_module(
2769 find_compressor(ccp_option[0]),
2770 "ppp-compress-%d", ccp_option[0]);
2771 if (!cp)
2772 goto out;
2773
2774 err = -ENOBUFS;
2775 if (data.transmit) {
2776 state = cp->comp_alloc(ccp_option, data.length);
2777 if (state) {
2778 ppp_xmit_lock(ppp);
2779 ppp->xstate &= ~SC_COMP_RUN;
2780 ocomp = ppp->xcomp;
2781 ostate = ppp->xc_state;
2782 ppp->xcomp = cp;
2783 ppp->xc_state = state;
2784 ppp_xmit_unlock(ppp);
2785 if (ostate) {
2786 ocomp->comp_free(ostate);
2787 module_put(ocomp->owner);
2788 }
2789 err = 0;
2790 } else
2791 module_put(cp->owner);
2792
2793 } else {
2794 state = cp->decomp_alloc(ccp_option, data.length);
2795 if (state) {
2796 ppp_recv_lock(ppp);
2797 ppp->rstate &= ~SC_DECOMP_RUN;
2798 ocomp = ppp->rcomp;
2799 ostate = ppp->rc_state;
2800 ppp->rcomp = cp;
2801 ppp->rc_state = state;
2802 ppp_recv_unlock(ppp);
2803 if (ostate) {
2804 ocomp->decomp_free(ostate);
2805 module_put(ocomp->owner);
2806 }
2807 err = 0;
2808 } else
2809 module_put(cp->owner);
2810 }
2811
2812 out:
2813 return err;
2814}
2815
2816/*
2817 * Look at a CCP packet and update our state accordingly.
2818 * We assume the caller has the xmit or recv path locked.
2819 */
2820static void
2821ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2822{
2823 unsigned char *dp;
2824 int len;
2825
2826 if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2827 return; /* no header */
2828 dp = skb->data + 2;
2829
2830 switch (CCP_CODE(dp)) {
2831 case CCP_CONFREQ:
2832
2833 /* A ConfReq starts negotiation of compression
2834 * in one direction of transmission,
2835 * and hence brings it down...but which way?
2836 *
2837 * Remember:
2838 * A ConfReq indicates what the sender would like to receive
2839 */
2840 if(inbound)
2841 /* He is proposing what I should send */
2842 ppp->xstate &= ~SC_COMP_RUN;
2843 else
2844 /* I am proposing to what he should send */
2845 ppp->rstate &= ~SC_DECOMP_RUN;
2846
2847 break;
2848
2849 case CCP_TERMREQ:
2850 case CCP_TERMACK:
2851 /*
2852 * CCP is going down, both directions of transmission
2853 */
2854 ppp->rstate &= ~SC_DECOMP_RUN;
2855 ppp->xstate &= ~SC_COMP_RUN;
2856 break;
2857
2858 case CCP_CONFACK:
2859 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2860 break;
2861 len = CCP_LENGTH(dp);
2862 if (!pskb_may_pull(skb, len + 2))
2863 return; /* too short */
2864 dp += CCP_HDRLEN;
2865 len -= CCP_HDRLEN;
2866 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2867 break;
2868 if (inbound) {
2869 /* we will start receiving compressed packets */
2870 if (!ppp->rc_state)
2871 break;
2872 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2873 ppp->file.index, 0, ppp->mru, ppp->debug)) {
2874 ppp->rstate |= SC_DECOMP_RUN;
2875 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2876 }
2877 } else {
2878 /* we will soon start sending compressed packets */
2879 if (!ppp->xc_state)
2880 break;
2881 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2882 ppp->file.index, 0, ppp->debug))
2883 ppp->xstate |= SC_COMP_RUN;
2884 }
2885 break;
2886
2887 case CCP_RESETACK:
2888 /* reset the [de]compressor */
2889 if ((ppp->flags & SC_CCP_UP) == 0)
2890 break;
2891 if (inbound) {
2892 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2893 ppp->rcomp->decomp_reset(ppp->rc_state);
2894 ppp->rstate &= ~SC_DC_ERROR;
2895 }
2896 } else {
2897 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2898 ppp->xcomp->comp_reset(ppp->xc_state);
2899 }
2900 break;
2901 }
2902}
2903
2904/* Free up compression resources. */
2905static void
2906ppp_ccp_closed(struct ppp *ppp)
2907{
2908 void *xstate, *rstate;
2909 struct compressor *xcomp, *rcomp;
2910
2911 ppp_lock(ppp);
2912 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2913 ppp->xstate = 0;
2914 xcomp = ppp->xcomp;
2915 xstate = ppp->xc_state;
2916 ppp->xc_state = NULL;
2917 ppp->rstate = 0;
2918 rcomp = ppp->rcomp;
2919 rstate = ppp->rc_state;
2920 ppp->rc_state = NULL;
2921 ppp_unlock(ppp);
2922
2923 if (xstate) {
2924 xcomp->comp_free(xstate);
2925 module_put(xcomp->owner);
2926 }
2927 if (rstate) {
2928 rcomp->decomp_free(rstate);
2929 module_put(rcomp->owner);
2930 }
2931}
2932
2933/* List of compressors. */
2934static LIST_HEAD(compressor_list);
2935static DEFINE_SPINLOCK(compressor_list_lock);
2936
2937struct compressor_entry {
2938 struct list_head list;
2939 struct compressor *comp;
2940};
2941
2942static struct compressor_entry *
2943find_comp_entry(int proto)
2944{
2945 struct compressor_entry *ce;
2946
2947 list_for_each_entry(ce, &compressor_list, list) {
2948 if (ce->comp->compress_proto == proto)
2949 return ce;
2950 }
2951 return NULL;
2952}
2953
2954/* Register a compressor */
2955int
2956ppp_register_compressor(struct compressor *cp)
2957{
2958 struct compressor_entry *ce;
2959 int ret;
2960 spin_lock(&compressor_list_lock);
2961 ret = -EEXIST;
2962 if (find_comp_entry(cp->compress_proto))
2963 goto out;
2964 ret = -ENOMEM;
2965 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
2966 if (!ce)
2967 goto out;
2968 ret = 0;
2969 ce->comp = cp;
2970 list_add(&ce->list, &compressor_list);
2971 out:
2972 spin_unlock(&compressor_list_lock);
2973 return ret;
2974}
2975
2976/* Unregister a compressor */
2977void
2978ppp_unregister_compressor(struct compressor *cp)
2979{
2980 struct compressor_entry *ce;
2981
2982 spin_lock(&compressor_list_lock);
2983 ce = find_comp_entry(cp->compress_proto);
2984 if (ce && ce->comp == cp) {
2985 list_del(&ce->list);
2986 kfree(ce);
2987 }
2988 spin_unlock(&compressor_list_lock);
2989}
2990
2991/* Find a compressor. */
2992static struct compressor *
2993find_compressor(int type)
2994{
2995 struct compressor_entry *ce;
2996 struct compressor *cp = NULL;
2997
2998 spin_lock(&compressor_list_lock);
2999 ce = find_comp_entry(type);
3000 if (ce) {
3001 cp = ce->comp;
3002 if (!try_module_get(cp->owner))
3003 cp = NULL;
3004 }
3005 spin_unlock(&compressor_list_lock);
3006 return cp;
3007}
3008
3009/*
3010 * Miscelleneous stuff.
3011 */
3012
3013static void
3014ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
3015{
3016 struct slcompress *vj = ppp->vj;
3017
3018 memset(st, 0, sizeof(*st));
3019 st->p.ppp_ipackets = ppp->stats64.rx_packets;
3020 st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
3021 st->p.ppp_ibytes = ppp->stats64.rx_bytes;
3022 st->p.ppp_opackets = ppp->stats64.tx_packets;
3023 st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
3024 st->p.ppp_obytes = ppp->stats64.tx_bytes;
3025 if (!vj)
3026 return;
3027 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
3028 st->vj.vjs_compressed = vj->sls_o_compressed;
3029 st->vj.vjs_searches = vj->sls_o_searches;
3030 st->vj.vjs_misses = vj->sls_o_misses;
3031 st->vj.vjs_errorin = vj->sls_i_error;
3032 st->vj.vjs_tossed = vj->sls_i_tossed;
3033 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
3034 st->vj.vjs_compressedin = vj->sls_i_compressed;
3035}
3036
3037/*
3038 * Stuff for handling the lists of ppp units and channels
3039 * and for initialization.
3040 */
3041
3042/*
3043 * Create a new ppp interface unit. Fails if it can't allocate memory
3044 * or if there is already a unit with the requested number.
3045 * unit == -1 means allocate a new number.
3046 */
3047static int ppp_create_interface(struct net *net, struct file *file, int *unit)
3048{
3049 struct ppp_config conf = {
3050 .file = file,
3051 .unit = *unit,
3052 .ifname_is_set = false,
3053 };
3054 struct net_device *dev;
3055 struct ppp *ppp;
3056 int err;
3057
3058 dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
3059 if (!dev) {
3060 err = -ENOMEM;
3061 goto err;
3062 }
3063 dev_net_set(dev, net);
3064 dev->rtnl_link_ops = &ppp_link_ops;
3065
3066 rtnl_lock();
3067
3068 err = ppp_dev_configure(net, dev, &conf);
3069 if (err < 0)
3070 goto err_dev;
3071 ppp = netdev_priv(dev);
3072 *unit = ppp->file.index;
3073
3074 rtnl_unlock();
3075
3076 return 0;
3077
3078err_dev:
3079 rtnl_unlock();
3080 free_netdev(dev);
3081err:
3082 return err;
3083}
3084
3085/*
3086 * Initialize a ppp_file structure.
3087 */
3088static void
3089init_ppp_file(struct ppp_file *pf, int kind)
3090{
3091 pf->kind = kind;
3092 skb_queue_head_init(&pf->xq);
3093 skb_queue_head_init(&pf->rq);
3094 refcount_set(&pf->refcnt, 1);
3095 init_waitqueue_head(&pf->rwait);
3096}
3097
3098/*
3099 * Free the memory used by a ppp unit. This is only called once
3100 * there are no channels connected to the unit and no file structs
3101 * that reference the unit.
3102 */
3103static void ppp_destroy_interface(struct ppp *ppp)
3104{
3105 atomic_dec(&ppp_unit_count);
3106
3107 if (!ppp->file.dead || ppp->n_channels) {
3108 /* "can't happen" */
3109 netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
3110 "but dead=%d n_channels=%d !\n",
3111 ppp, ppp->file.dead, ppp->n_channels);
3112 return;
3113 }
3114
3115 ppp_ccp_closed(ppp);
3116 if (ppp->vj) {
3117 slhc_free(ppp->vj);
3118 ppp->vj = NULL;
3119 }
3120 skb_queue_purge(&ppp->file.xq);
3121 skb_queue_purge(&ppp->file.rq);
3122#ifdef CONFIG_PPP_MULTILINK
3123 skb_queue_purge(&ppp->mrq);
3124#endif /* CONFIG_PPP_MULTILINK */
3125#ifdef CONFIG_PPP_FILTER
3126 if (ppp->pass_filter) {
3127 bpf_prog_destroy(ppp->pass_filter);
3128 ppp->pass_filter = NULL;
3129 }
3130
3131 if (ppp->active_filter) {
3132 bpf_prog_destroy(ppp->active_filter);
3133 ppp->active_filter = NULL;
3134 }
3135#endif /* CONFIG_PPP_FILTER */
3136
3137 kfree_skb(ppp->xmit_pending);
3138 free_percpu(ppp->xmit_recursion);
3139
3140 free_netdev(ppp->dev);
3141}
3142
3143/*
3144 * Locate an existing ppp unit.
3145 * The caller should have locked the all_ppp_mutex.
3146 */
3147static struct ppp *
3148ppp_find_unit(struct ppp_net *pn, int unit)
3149{
3150 return unit_find(&pn->units_idr, unit);
3151}
3152
3153/*
3154 * Locate an existing ppp channel.
3155 * The caller should have locked the all_channels_lock.
3156 * First we look in the new_channels list, then in the
3157 * all_channels list. If found in the new_channels list,
3158 * we move it to the all_channels list. This is for speed
3159 * when we have a lot of channels in use.
3160 */
3161static struct channel *
3162ppp_find_channel(struct ppp_net *pn, int unit)
3163{
3164 struct channel *pch;
3165
3166 list_for_each_entry(pch, &pn->new_channels, list) {
3167 if (pch->file.index == unit) {
3168 list_move(&pch->list, &pn->all_channels);
3169 return pch;
3170 }
3171 }
3172
3173 list_for_each_entry(pch, &pn->all_channels, list) {
3174 if (pch->file.index == unit)
3175 return pch;
3176 }
3177
3178 return NULL;
3179}
3180
3181/*
3182 * Connect a PPP channel to a PPP interface unit.
3183 */
3184static int
3185ppp_connect_channel(struct channel *pch, int unit)
3186{
3187 struct ppp *ppp;
3188 struct ppp_net *pn;
3189 int ret = -ENXIO;
3190 int hdrlen;
3191
3192 pn = ppp_pernet(pch->chan_net);
3193
3194 mutex_lock(&pn->all_ppp_mutex);
3195 ppp = ppp_find_unit(pn, unit);
3196 if (!ppp)
3197 goto out;
3198 write_lock_bh(&pch->upl);
3199 ret = -EINVAL;
3200 if (pch->ppp)
3201 goto outl;
3202
3203 ppp_lock(ppp);
3204 spin_lock_bh(&pch->downl);
3205 if (!pch->chan) {
3206 /* Don't connect unregistered channels */
3207 spin_unlock_bh(&pch->downl);
3208 ppp_unlock(ppp);
3209 ret = -ENOTCONN;
3210 goto outl;
3211 }
3212 spin_unlock_bh(&pch->downl);
3213 if (pch->file.hdrlen > ppp->file.hdrlen)
3214 ppp->file.hdrlen = pch->file.hdrlen;
3215 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
3216 if (hdrlen > ppp->dev->hard_header_len)
3217 ppp->dev->hard_header_len = hdrlen;
3218 list_add_tail(&pch->clist, &ppp->channels);
3219 ++ppp->n_channels;
3220 pch->ppp = ppp;
3221 refcount_inc(&ppp->file.refcnt);
3222 ppp_unlock(ppp);
3223 ret = 0;
3224
3225 outl:
3226 write_unlock_bh(&pch->upl);
3227 out:
3228 mutex_unlock(&pn->all_ppp_mutex);
3229 return ret;
3230}
3231
3232/*
3233 * Disconnect a channel from its ppp unit.
3234 */
3235static int
3236ppp_disconnect_channel(struct channel *pch)
3237{
3238 struct ppp *ppp;
3239 int err = -EINVAL;
3240
3241 write_lock_bh(&pch->upl);
3242 ppp = pch->ppp;
3243 pch->ppp = NULL;
3244 write_unlock_bh(&pch->upl);
3245 if (ppp) {
3246 /* remove it from the ppp unit's list */
3247 ppp_lock(ppp);
3248 list_del(&pch->clist);
3249 if (--ppp->n_channels == 0)
3250 wake_up_interruptible(&ppp->file.rwait);
3251 ppp_unlock(ppp);
3252 if (refcount_dec_and_test(&ppp->file.refcnt))
3253 ppp_destroy_interface(ppp);
3254 err = 0;
3255 }
3256 return err;
3257}
3258
3259/*
3260 * Free up the resources used by a ppp channel.
3261 */
3262static void ppp_destroy_channel(struct channel *pch)
3263{
3264 put_net(pch->chan_net);
3265 pch->chan_net = NULL;
3266
3267 atomic_dec(&channel_count);
3268
3269 if (!pch->file.dead) {
3270 /* "can't happen" */
3271 pr_err("ppp: destroying undead channel %p !\n", pch);
3272 return;
3273 }
3274 skb_queue_purge(&pch->file.xq);
3275 skb_queue_purge(&pch->file.rq);
3276 kfree(pch);
3277}
3278
3279static void __exit ppp_cleanup(void)
3280{
3281 /* should never happen */
3282 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
3283 pr_err("PPP: removing module but units remain!\n");
3284 rtnl_link_unregister(&ppp_link_ops);
3285 unregister_chrdev(PPP_MAJOR, "ppp");
3286 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
3287 class_destroy(ppp_class);
3288 unregister_pernet_device(&ppp_net_ops);
3289}
3290
3291/*
3292 * Units handling. Caller must protect concurrent access
3293 * by holding all_ppp_mutex
3294 */
3295
3296/* associate pointer with specified number */
3297static int unit_set(struct idr *p, void *ptr, int n)
3298{
3299 int unit;
3300
3301 unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
3302 if (unit == -ENOSPC)
3303 unit = -EINVAL;
3304 return unit;
3305}
3306
3307/* get new free unit number and associate pointer with it */
Olivier Deprez0e641232021-09-23 10:07:05 +02003308static int unit_get(struct idr *p, void *ptr, int min)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003309{
Olivier Deprez0e641232021-09-23 10:07:05 +02003310 return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003311}
3312
3313/* put unit number back to a pool */
3314static void unit_put(struct idr *p, int n)
3315{
3316 idr_remove(p, n);
3317}
3318
3319/* get pointer associated with the number */
3320static void *unit_find(struct idr *p, int n)
3321{
3322 return idr_find(p, n);
3323}
3324
3325/* Module/initialization stuff */
3326
3327module_init(ppp_init);
3328module_exit(ppp_cleanup);
3329
3330EXPORT_SYMBOL(ppp_register_net_channel);
3331EXPORT_SYMBOL(ppp_register_channel);
3332EXPORT_SYMBOL(ppp_unregister_channel);
3333EXPORT_SYMBOL(ppp_channel_index);
3334EXPORT_SYMBOL(ppp_unit_number);
3335EXPORT_SYMBOL(ppp_dev_name);
3336EXPORT_SYMBOL(ppp_input);
3337EXPORT_SYMBOL(ppp_input_error);
3338EXPORT_SYMBOL(ppp_output_wakeup);
3339EXPORT_SYMBOL(ppp_register_compressor);
3340EXPORT_SYMBOL(ppp_unregister_compressor);
3341MODULE_LICENSE("GPL");
3342MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
3343MODULE_ALIAS_RTNL_LINK("ppp");
3344MODULE_ALIAS("devname:ppp");