v4.19.13 snapshot.
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
new file mode 100644
index 0000000..1373c6d
--- /dev/null
+++ b/drivers/net/ppp/Kconfig
@@ -0,0 +1,178 @@
+#
+# PPP network device configuration
+#
+
+config PPP
+	tristate "PPP (point-to-point protocol) support"
+	select SLHC
+	---help---
+	  PPP (Point to Point Protocol) is a newer and better SLIP.  It serves
+	  the same purpose: sending Internet traffic over telephone (and other
+	  serial) lines.  Ask your access provider if they support it, because
+	  otherwise you can't use it; most Internet access providers these
+	  days support PPP rather than SLIP.
+
+	  To use PPP, you need an additional program called pppd as described
+	  in the PPP-HOWTO, available at
+	  <http://www.tldp.org/docs.html#howto>.  Make sure that you have
+	  the version of pppd recommended in <file:Documentation/Changes>.
+	  The PPP option enlarges your kernel by about 16 KB.
+
+	  There are actually two versions of PPP: the traditional PPP for
+	  asynchronous lines, such as regular analog phone lines, and
+	  synchronous PPP which can be used over digital ISDN lines for
+	  example.  If you want to use PPP over phone lines or other
+	  asynchronous serial lines, you need to say Y (or M) here and also to
+	  the next option, "PPP support for async serial ports".  For PPP over
+	  synchronous lines, you should say Y (or M) here and to "Support
+	  synchronous PPP", below.
+
+	  If you said Y to "Version information on all symbols" above, then
+	  you cannot compile the PPP driver into the kernel; you can then only
+	  compile it as a module. To compile this driver as a module, choose M
+	  here. The module will be called ppp_generic.
+
+if PPP
+
+config PPP_BSDCOMP
+	tristate "PPP BSD-Compress compression"
+	depends on PPP
+	---help---
+	  Support for the BSD-Compress compression method for PPP, which uses
+	  the LZW compression method to compress each PPP packet before it is
+	  sent over the wire. The machine at the other end of the PPP link
+	  (usually your ISP) has to support the BSD-Compress compression
+	  method as well for this to be useful. Even if they don't support it,
+	  it is safe to say Y here.
+
+	  The PPP Deflate compression method ("PPP Deflate compression",
+	  above) is preferable to BSD-Compress, because it compresses better
+	  and is patent-free.
+
+	  Note that the BSD compression code will always be compiled as a
+	  module; it is called bsd_comp and will show up in the directory
+	  modules once you have said "make modules". If unsure, say N.
+
+config PPP_DEFLATE
+	tristate "PPP Deflate compression"
+	depends on PPP
+	select ZLIB_INFLATE
+	select ZLIB_DEFLATE
+	---help---
+	  Support for the Deflate compression method for PPP, which uses the
+	  Deflate algorithm (the same algorithm that gzip uses) to compress
+	  each PPP packet before it is sent over the wire.  The machine at the
+	  other end of the PPP link (usually your ISP) has to support the
+	  Deflate compression method as well for this to be useful.  Even if
+	  they don't support it, it is safe to say Y here.
+
+	  To compile this driver as a module, choose M here.
+
+config PPP_FILTER
+	bool "PPP filtering"
+	depends on PPP
+	---help---
+	  Say Y here if you want to be able to filter the packets passing over
+	  PPP interfaces.  This allows you to control which packets count as
+	  activity (i.e. which packets will reset the idle timer or bring up
+	  a demand-dialed link) and which packets are to be dropped entirely.
+	  You need to say Y here if you wish to use the pass-filter and
+	  active-filter options to pppd.
+
+	  If unsure, say N.
+
+config PPP_MPPE
+	tristate "PPP MPPE compression (encryption)"
+	depends on PPP
+	select CRYPTO
+	select CRYPTO_SHA1
+	select CRYPTO_ARC4
+	select CRYPTO_ECB
+	---help---
+	  Support for the MPPE Encryption protocol, as employed by the
+	  Microsoft Point-to-Point Tunneling Protocol.
+
+	  See http://pptpclient.sourceforge.net/ for information on
+	  configuring PPTP clients and servers to utilize this method.
+
+config PPP_MULTILINK
+	bool "PPP multilink support"
+	depends on PPP
+	---help---
+	  PPP multilink is a protocol (defined in RFC 1990) which allows you
+	  to combine several (logical or physical) lines into one logical PPP
+	  connection, so that you can utilize your full bandwidth.
+
+	  This has to be supported at the other end as well and you need a
+	  version of the pppd daemon which understands the multilink protocol.
+
+	  If unsure, say N.
+
+config PPPOATM
+	tristate "PPP over ATM"
+	depends on ATM && PPP
+	---help---
+	  Support PPP (Point to Point Protocol) encapsulated in ATM frames.
+	  This implementation does not yet comply with section 8 of RFC2364,
+	  which can lead to bad results if the ATM peer loses state and
+	  changes its encapsulation unilaterally.
+
+config PPPOE
+	tristate "PPP over Ethernet"
+	depends on PPP
+	---help---
+	  Support for PPP over Ethernet.
+
+	  This driver requires the latest version of pppd from the CVS
+	  repository at cvs.samba.org.  Alternatively, see the
+	  RoaringPenguin package (<http://www.roaringpenguin.com/pppoe>)
+	  which contains instruction on how to use this driver (under
+	  the heading "Kernel mode PPPoE").
+
+config PPTP
+	tristate "PPP over IPv4 (PPTP)"
+	depends on PPP && NET_IPGRE_DEMUX
+	---help---
+	  Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
+
+	  This driver requires pppd plugin to work in client mode or
+	  modified pptpd (poptop) to work in server mode.
+	  See http://accel-pptp.sourceforge.net/ for information how to
+	  utilize this module.
+
+config PPPOL2TP
+	tristate "PPP over L2TP"
+	depends on L2TP && PPP
+	---help---
+	  Support for PPP-over-L2TP socket family. L2TP is a protocol
+	  used by ISPs and enterprises to tunnel PPP traffic over UDP
+	  tunnels. L2TP is replacing PPTP for VPN uses.
+if TTY
+
+config PPP_ASYNC
+	tristate "PPP support for async serial ports"
+	depends on PPP
+	select CRC_CCITT
+	---help---
+	  Say Y (or M) here if you want to be able to use PPP over standard
+	  asynchronous serial ports, such as COM1 or COM2 on a PC.  If you use
+	  a modem (not a synchronous or ISDN modem) to contact your ISP, you
+	  need this option.
+
+	  To compile this driver as a module, choose M here.
+
+	  If unsure, say Y.
+
+config PPP_SYNC_TTY
+	tristate "PPP support for sync tty ports"
+	depends on PPP
+	---help---
+	  Say Y (or M) here if you want to be able to use PPP over synchronous
+	  (HDLC) tty devices, such as the SyncLink adapter. These devices
+	  are often used for high-speed leased lines like T1/E1.
+
+	  To compile this driver as a module, choose M here.
+
+endif # TTY
+
+endif # PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
new file mode 100644
index 0000000..16c457d
--- /dev/null
+++ b/drivers/net/ppp/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux PPP network device drivers.
+#
+
+obj-$(CONFIG_PPP) += ppp_generic.o
+obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
+obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
+obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
+obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
+obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
+obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o
+obj-$(CONFIG_PPTP) += pppox.o pptp.o
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
new file mode 100644
index 0000000..61fedb2
--- /dev/null
+++ b/drivers/net/ppp/bsd_comp.c
@@ -0,0 +1,1170 @@
+/*
+ * Update: The Berkeley copyright was changed, and the change
+ * is retroactive to all "true" BSD software (ie everything
+ * from UCB as opposed to other peoples code that just carried
+ * the same license). The new copyright doesn't clash with the
+ * GPL, so the module-only restriction has been removed..
+ */
+
+/* Because this code is derived from the 4.3BSD compress source:
+ *
+ * Copyright (c) 1985, 1986 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * James A. Woods, derived from original work by Spencer Thomas
+ * and Joseph Orost.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This version is for use with contiguous buffers on Linux-derived systems.
+ *
+ *  ==FILEVERSION 20000226==
+ *
+ *  NOTE TO MAINTAINERS:
+ *     If you modify this file at all, please set the number above to the
+ *     date of the modification as YYMMDD (year month day).
+ *     bsd_comp.c is shipped with a PPP distribution as well as with
+ *     the kernel; if everyone increases the FILEVERSION number above,
+ *     then scripts can do the right thing when deciding whether to
+ *     install a new bsd_comp.c file. Don't change the format of that
+ *     line otherwise, so the installation script can recognize it.
+ *
+ * From: bsd_comp.c,v 1.3 1994/12/08 01:59:58 paulus Exp
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+
+#include <linux/ppp_defs.h>
+
+#undef   PACKETPTR
+#define  PACKETPTR 1
+#include <linux/ppp-comp.h>
+#undef   PACKETPTR
+
+#include <asm/byteorder.h>
+
+/*
+ * PPP "BSD compress" compression
+ *  The differences between this compression and the classic BSD LZW
+ *  source are obvious from the requirement that the classic code worked
+ *  with files while this handles arbitrarily long streams that
+ *  are broken into packets.  They are:
+ *
+ *	When the code size expands, a block of junk is not emitted by
+ *	    the compressor and not expected by the decompressor.
+ *
+ *	New codes are not necessarily assigned every time an old
+ *	    code is output by the compressor.  This is because a packet
+ *	    end forces a code to be emitted, but does not imply that a
+ *	    new sequence has been seen.
+ *
+ *	The compression ratio is checked at the first end of a packet
+ *	    after the appropriate gap.	Besides simplifying and speeding
+ *	    things up, this makes it more likely that the transmitter
+ *	    and receiver will agree when the dictionary is cleared when
+ *	    compression is not going well.
+ */
+
+/*
+ * Macros to extract protocol version and number of bits
+ * from the third byte of the BSD Compress CCP configuration option.
+ */
+
+#define BSD_VERSION(x)	((x) >> 5)
+#define BSD_NBITS(x)	((x) & 0x1F)
+
+#define BSD_CURRENT_VERSION	1
+
+/*
+ * A dictionary for doing BSD compress.
+ */
+
+struct bsd_dict {
+    union {				/* hash value */
+	unsigned long	fcode;
+	struct {
+#if defined(__LITTLE_ENDIAN)		/* Little endian order */
+	    unsigned short	prefix;	/* preceding code */
+	    unsigned char	suffix; /* last character of new code */
+	    unsigned char	pad;
+#elif defined(__BIG_ENDIAN)		/* Big endian order */
+	    unsigned char	pad;
+	    unsigned char	suffix; /* last character of new code */
+	    unsigned short	prefix; /* preceding code */
+#else
+#error Endianness not defined...
+#endif
+	} hs;
+    } f;
+    unsigned short codem1;		/* output of hash table -1 */
+    unsigned short cptr;		/* map code to hash table entry */
+};
+
+struct bsd_db {
+    int	    totlen;			/* length of this structure */
+    unsigned int   hsize;		/* size of the hash table */
+    unsigned char  hshift;		/* used in hash function */
+    unsigned char  n_bits;		/* current bits/code */
+    unsigned char  maxbits;		/* maximum bits/code */
+    unsigned char  debug;		/* non-zero if debug desired */
+    unsigned char  unit;		/* ppp unit number */
+    unsigned short seqno;		/* sequence # of next packet */
+    unsigned int   mru;			/* size of receive (decompress) bufr */
+    unsigned int   maxmaxcode;		/* largest valid code */
+    unsigned int   max_ent;		/* largest code in use */
+    unsigned int   in_count;		/* uncompressed bytes, aged */
+    unsigned int   bytes_out;		/* compressed bytes, aged */
+    unsigned int   ratio;		/* recent compression ratio */
+    unsigned int   checkpoint;		/* when to next check the ratio */
+    unsigned int   clear_count;		/* times dictionary cleared */
+    unsigned int   incomp_count;	/* incompressible packets */
+    unsigned int   incomp_bytes;	/* incompressible bytes */
+    unsigned int   uncomp_count;	/* uncompressed packets */
+    unsigned int   uncomp_bytes;	/* uncompressed bytes */
+    unsigned int   comp_count;		/* compressed packets */
+    unsigned int   comp_bytes;		/* compressed bytes */
+    unsigned short  *lens;		/* array of lengths of codes */
+    struct bsd_dict *dict;		/* dictionary */
+};
+
+#define BSD_OVHD	2		/* BSD compress overhead/packet */
+#define MIN_BSD_BITS	9
+#define BSD_INIT_BITS	MIN_BSD_BITS
+#define MAX_BSD_BITS	15
+
+static void	bsd_free (void *state);
+static void	*bsd_alloc(unsigned char *options, int opt_len, int decomp);
+static void	*bsd_comp_alloc (unsigned char *options, int opt_len);
+static void	*bsd_decomp_alloc (unsigned char *options, int opt_len);
+
+static int	bsd_init        (void *db, unsigned char *options,
+			         int opt_len, int unit, int debug, int decomp);
+static int	bsd_comp_init   (void *state, unsigned char *options,
+			         int opt_len, int unit, int opthdr, int debug);
+static int	bsd_decomp_init (void *state, unsigned char *options,
+				 int opt_len, int unit, int opthdr, int mru,
+				 int debug);
+
+static void	bsd_reset (void *state);
+static void	bsd_comp_stats (void *state, struct compstat *stats);
+
+static int	bsd_compress (void *state, unsigned char *rptr,
+			      unsigned char *obuf, int isize, int osize);
+static void	bsd_incomp (void *state, unsigned char *ibuf, int icnt);
+
+static int	bsd_decompress (void *state, unsigned char *ibuf, int isize,
+				unsigned char *obuf, int osize);
+
+/* These are in ppp_generic.c */
+extern int  ppp_register_compressor   (struct compressor *cp);
+extern void ppp_unregister_compressor (struct compressor *cp);
+
+/*
+ * the next two codes should not be changed lightly, as they must not
+ * lie within the contiguous general code space.
+ */
+#define CLEAR	256			/* table clear output code */
+#define FIRST	257			/* first free entry */
+#define LAST	255
+
+#define MAXCODE(b)	((1 << (b)) - 1)
+#define BADCODEM1	MAXCODE(MAX_BSD_BITS)
+
+#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
+					 ^ (unsigned long)(prefix))
+#define BSD_KEY(prefix,suffix)		((((unsigned long)(suffix)) << 16) \
+					 + (unsigned long)(prefix))
+
+#define CHECK_GAP	10000		/* Ratio check interval */
+
+#define RATIO_SCALE_LOG	8
+#define RATIO_SCALE	(1<<RATIO_SCALE_LOG)
+#define RATIO_MAX	(0x7fffffff>>RATIO_SCALE_LOG)
+
+/*
+ * clear the dictionary
+ */
+
+static void
+bsd_clear(struct bsd_db *db)
+{
+    db->clear_count++;
+    db->max_ent      = FIRST-1;
+    db->n_bits       = BSD_INIT_BITS;
+    db->bytes_out    = 0;
+    db->in_count     = 0;
+    db->ratio	     = 0;
+    db->checkpoint   = CHECK_GAP;
+}
+
+/*
+ * If the dictionary is full, then see if it is time to reset it.
+ *
+ * Compute the compression ratio using fixed-point arithmetic
+ * with 8 fractional bits.
+ *
+ * Since we have an infinite stream instead of a single file,
+ * watch only the local compression ratio.
+ *
+ * Since both peers must reset the dictionary at the same time even in
+ * the absence of CLEAR codes (while packets are incompressible), they
+ * must compute the same ratio.
+ */
+
+static int bsd_check (struct bsd_db *db)	/* 1=output CLEAR */
+  {
+    unsigned int new_ratio;
+
+    if (db->in_count >= db->checkpoint)
+      {
+	/* age the ratio by limiting the size of the counts */
+	if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
+	  {
+	    db->in_count  -= (db->in_count  >> 2);
+	    db->bytes_out -= (db->bytes_out >> 2);
+	  }
+
+	db->checkpoint = db->in_count + CHECK_GAP;
+
+	if (db->max_ent >= db->maxmaxcode)
+	  {
+	    /* Reset the dictionary only if the ratio is worse,
+	     * or if it looks as if it has been poisoned
+	     * by incompressible data.
+	     *
+	     * This does not overflow, because
+	     *	db->in_count <= RATIO_MAX.
+	     */
+
+	    new_ratio = db->in_count << RATIO_SCALE_LOG;
+	    if (db->bytes_out != 0)
+	      {
+		new_ratio /= db->bytes_out;
+	      }
+
+	    if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
+	      {
+		bsd_clear (db);
+		return 1;
+	      }
+	    db->ratio = new_ratio;
+	  }
+      }
+    return 0;
+  }
+
+/*
+ * Return statistics.
+ */
+
+static void bsd_comp_stats (void *state, struct compstat *stats)
+  {
+    struct bsd_db *db = (struct bsd_db *) state;
+
+    stats->unc_bytes    = db->uncomp_bytes;
+    stats->unc_packets  = db->uncomp_count;
+    stats->comp_bytes   = db->comp_bytes;
+    stats->comp_packets = db->comp_count;
+    stats->inc_bytes    = db->incomp_bytes;
+    stats->inc_packets  = db->incomp_count;
+    stats->in_count     = db->in_count;
+    stats->bytes_out    = db->bytes_out;
+  }
+
+/*
+ * Reset state, as on a CCP ResetReq.
+ */
+
+static void bsd_reset (void *state)
+  {
+    struct bsd_db *db = (struct bsd_db *) state;
+
+    bsd_clear(db);
+
+    db->seqno       = 0;
+    db->clear_count = 0;
+  }
+
+/*
+ * Release the compression structure
+ */
+
+static void bsd_free (void *state)
+{
+	struct bsd_db *db = state;
+
+	if (!db)
+		return;
+
+/*
+ * Release the dictionary
+ */
+	vfree(db->dict);
+	db->dict = NULL;
+/*
+ * Release the string buffer
+ */
+	vfree(db->lens);
+	db->lens = NULL;
+/*
+ * Finally release the structure itself.
+ */
+	kfree(db);
+}
+
+/*
+ * Allocate space for a (de) compressor.
+ */
+
+static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
+  {
+    int bits;
+    unsigned int hsize, hshift, maxmaxcode;
+    struct bsd_db *db;
+
+    if (opt_len != 3 || options[0] != CI_BSD_COMPRESS || options[1] != 3
+	|| BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
+      {
+	return NULL;
+      }
+
+    bits = BSD_NBITS(options[2]);
+
+    switch (bits)
+      {
+    case 9:			/* needs 82152 for both directions */
+    case 10:			/* needs 84144 */
+    case 11:			/* needs 88240 */
+    case 12:			/* needs 96432 */
+	hsize = 5003;
+	hshift = 4;
+	break;
+    case 13:			/* needs 176784 */
+	hsize = 9001;
+	hshift = 5;
+	break;
+    case 14:			/* needs 353744 */
+	hsize = 18013;
+	hshift = 6;
+	break;
+    case 15:			/* needs 691440 */
+	hsize = 35023;
+	hshift = 7;
+	break;
+    case 16:			/* needs 1366160--far too much, */
+	/* hsize = 69001; */	/* and 69001 is too big for cptr */
+	/* hshift = 8; */	/* in struct bsd_db */
+	/* break; */
+    default:
+	return NULL;
+      }
+/*
+ * Allocate the main control structure for this instance.
+ */
+    maxmaxcode = MAXCODE(bits);
+    db         = kzalloc(sizeof (struct bsd_db),
+					    GFP_KERNEL);
+    if (!db)
+      {
+	return NULL;
+      }
+
+/*
+ * Allocate space for the dictionary. This may be more than one page in
+ * length.
+ */
+    db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
+    if (!db->dict)
+      {
+	bsd_free (db);
+	return NULL;
+      }
+
+/*
+ * If this is the compression buffer then there is no length data.
+ */
+    if (!decomp)
+      {
+	db->lens = NULL;
+      }
+/*
+ * For decompression, the length information is needed as well.
+ */
+    else
+      {
+        db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
+	if (!db->lens)
+	  {
+	    bsd_free (db);
+	    return NULL;
+	  }
+      }
+/*
+ * Initialize the data information for the compression code
+ */
+    db->totlen     = sizeof (struct bsd_db)   +
+      		    (sizeof (struct bsd_dict) * hsize);
+
+    db->hsize      = hsize;
+    db->hshift     = hshift;
+    db->maxmaxcode = maxmaxcode;
+    db->maxbits    = bits;
+
+    return (void *) db;
+  }
+
+static void *bsd_comp_alloc (unsigned char *options, int opt_len)
+  {
+    return bsd_alloc (options, opt_len, 0);
+  }
+
+static void *bsd_decomp_alloc (unsigned char *options, int opt_len)
+  {
+    return bsd_alloc (options, opt_len, 1);
+  }
+
+/*
+ * Initialize the database.
+ */
+
+static int bsd_init (void *state, unsigned char *options,
+		     int opt_len, int unit, int debug, int decomp)
+  {
+    struct bsd_db *db = state;
+    int indx;
+
+    if ((opt_len != 3) || (options[0] != CI_BSD_COMPRESS) || (options[1] != 3)
+	|| (BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
+	|| (BSD_NBITS(options[2]) != db->maxbits)
+	|| (decomp && db->lens == NULL))
+      {
+	return 0;
+      }
+
+    if (decomp)
+      {
+	indx = LAST;
+	do
+	  {
+	    db->lens[indx] = 1;
+	  }
+	while (indx-- > 0);
+      }
+
+    indx = db->hsize;
+    while (indx-- != 0)
+      {
+	db->dict[indx].codem1 = BADCODEM1;
+	db->dict[indx].cptr   = 0;
+      }
+
+    db->unit = unit;
+    db->mru  = 0;
+#ifndef DEBUG
+    if (debug)
+#endif
+      db->debug = 1;
+
+    bsd_reset(db);
+
+    return 1;
+  }
+
+static int bsd_comp_init (void *state, unsigned char *options,
+			  int opt_len, int unit, int opthdr, int debug)
+  {
+    return bsd_init (state, options, opt_len, unit, debug, 0);
+  }
+
+static int bsd_decomp_init (void *state, unsigned char *options,
+			    int opt_len, int unit, int opthdr, int mru,
+			    int debug)
+  {
+    return bsd_init (state, options, opt_len, unit, debug, 1);
+  }
+
+/*
+ * Obtain pointers to the various structures in the compression tables
+ */
+
+#define dict_ptrx(p,idx) &(p->dict[idx])
+#define lens_ptrx(p,idx) &(p->lens[idx])
+
+#ifdef DEBUG
+static unsigned short *lens_ptr(struct bsd_db *db, int idx)
+  {
+    if ((unsigned int) idx > (unsigned int) db->maxmaxcode)
+      {
+	printk ("<9>ppp: lens_ptr(%d) > max\n", idx);
+	idx = 0;
+      }
+    return lens_ptrx (db, idx);
+  }
+
+static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
+  {
+    if ((unsigned int) idx >= (unsigned int) db->hsize)
+      {
+	printk ("<9>ppp: dict_ptr(%d) > max\n", idx);
+	idx = 0;
+      }
+    return dict_ptrx (db, idx);
+  }
+
+#else
+#define lens_ptr(db,idx) lens_ptrx(db,idx)
+#define dict_ptr(db,idx) dict_ptrx(db,idx)
+#endif
+
+/*
+ * compress a packet
+ *
+ *	The result of this function is the size of the compressed
+ *	packet. A zero is returned if the packet was not compressed
+ *	for some reason, such as the size being larger than uncompressed.
+ *
+ *	One change from the BSD compress command is that when the
+ *	code size expands, we do not output a bunch of padding.
+ */
+
+static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf,
+			 int isize, int osize)
+  {
+    struct bsd_db *db;
+    int hshift;
+    unsigned int max_ent;
+    unsigned int n_bits;
+    unsigned int bitno;
+    unsigned long accm;
+    int ent;
+    unsigned long fcode;
+    struct bsd_dict *dictp;
+    unsigned char c;
+    int hval;
+    int disp;
+    int ilen;
+    int mxcode;
+    unsigned char *wptr;
+    int olen;
+
+#define PUTBYTE(v)			\
+  {					\
+    ++olen;				\
+    if (wptr)				\
+      {					\
+	*wptr++ = (unsigned char) (v);	\
+	if (olen >= osize)		\
+	  {				\
+	    wptr = NULL;		\
+	  }				\
+      }					\
+  }
+
+#define OUTPUT(ent)			\
+  {					\
+    bitno -= n_bits;			\
+    accm |= ((ent) << bitno);		\
+    do					\
+      {					\
+	PUTBYTE(accm >> 24);		\
+	accm <<= 8;			\
+	bitno += 8;			\
+      }					\
+    while (bitno <= 24);		\
+  }
+
+  /*
+   * If the protocol is not in the range we're interested in,
+   * just return without compressing the packet.  If it is,
+   * the protocol becomes the first byte to compress.
+   */
+
+    ent = PPP_PROTOCOL(rptr);
+    if (ent < 0x21 || ent > 0xf9)
+      {
+	return 0;
+      }
+
+    db      = (struct bsd_db *) state;
+    hshift  = db->hshift;
+    max_ent = db->max_ent;
+    n_bits  = db->n_bits;
+    bitno   = 32;
+    accm    = 0;
+    mxcode  = MAXCODE (n_bits);
+
+    /* Initialize the output pointers */
+    wptr  = obuf;
+    olen  = PPP_HDRLEN + BSD_OVHD;
+
+    if (osize > isize)
+      {
+	osize = isize;
+      }
+
+    /* This is the PPP header information */
+    if (wptr)
+      {
+	*wptr++ = PPP_ADDRESS(rptr);
+	*wptr++ = PPP_CONTROL(rptr);
+	*wptr++ = 0;
+	*wptr++ = PPP_COMP;
+	*wptr++ = db->seqno >> 8;
+	*wptr++ = db->seqno;
+      }
+
+    /* Skip the input header */
+    rptr  += PPP_HDRLEN;
+    isize -= PPP_HDRLEN;
+    ilen   = ++isize;	/* Low byte of protocol is counted as input */
+
+    while (--ilen > 0)
+      {
+	c     = *rptr++;
+	fcode = BSD_KEY  (ent, c);
+	hval  = BSD_HASH (ent, c, hshift);
+	dictp = dict_ptr (db, hval);
+
+	/* Validate and then check the entry. */
+	if (dictp->codem1 >= max_ent)
+	  {
+	    goto nomatch;
+	  }
+
+	if (dictp->f.fcode == fcode)
+	  {
+	    ent = dictp->codem1 + 1;
+	    continue;	/* found (prefix,suffix) */
+	  }
+
+	/* continue probing until a match or invalid entry */
+	disp = (hval == 0) ? 1 : hval;
+
+	do
+	  {
+	    hval += disp;
+	    if (hval >= db->hsize)
+	      {
+		hval -= db->hsize;
+	      }
+	    dictp = dict_ptr (db, hval);
+	    if (dictp->codem1 >= max_ent)
+	      {
+		goto nomatch;
+	      }
+	  }
+	while (dictp->f.fcode != fcode);
+
+	ent = dictp->codem1 + 1;	/* finally found (prefix,suffix) */
+	continue;
+
+nomatch:
+	OUTPUT(ent);		/* output the prefix */
+
+	/* code -> hashtable */
+	if (max_ent < db->maxmaxcode)
+	  {
+	    struct bsd_dict *dictp2;
+	    struct bsd_dict *dictp3;
+	    int    indx;
+
+	    /* expand code size if needed */
+	    if (max_ent >= mxcode)
+	      {
+		db->n_bits = ++n_bits;
+		mxcode     = MAXCODE (n_bits);
+	      }
+
+	    /* Invalidate old hash table entry using
+	     * this code, and then take it over.
+	     */
+
+	    dictp2 = dict_ptr (db, max_ent + 1);
+	    indx   = dictp2->cptr;
+	    dictp3 = dict_ptr (db, indx);
+
+	    if (dictp3->codem1 == max_ent)
+	      {
+		dictp3->codem1 = BADCODEM1;
+	      }
+
+	    dictp2->cptr   = hval;
+	    dictp->codem1  = max_ent;
+	    dictp->f.fcode = fcode;
+	    db->max_ent    = ++max_ent;
+
+	    if (db->lens)
+	      {
+		unsigned short *len1 = lens_ptr (db, max_ent);
+		unsigned short *len2 = lens_ptr (db, ent);
+		*len1 = *len2 + 1;
+	      }
+	  }
+	ent = c;
+      }
+
+    OUTPUT(ent);		/* output the last code */
+
+    db->bytes_out    += olen - PPP_HDRLEN - BSD_OVHD;
+    db->uncomp_bytes += isize;
+    db->in_count     += isize;
+    ++db->uncomp_count;
+    ++db->seqno;
+
+    if (bitno < 32)
+      {
+	++db->bytes_out; /* must be set before calling bsd_check */
+      }
+
+    /*
+     * Generate the clear command if needed
+     */
+
+    if (bsd_check(db))
+      {
+	OUTPUT (CLEAR);
+      }
+
+    /*
+     * Pad dribble bits of last code with ones.
+     * Do not emit a completely useless byte of ones.
+     */
+
+    if (bitno != 32)
+      {
+	PUTBYTE((accm | (0xff << (bitno-8))) >> 24);
+      }
+
+    /*
+     * Increase code size if we would have without the packet
+     * boundary because the decompressor will do so.
+     */
+
+    if (max_ent >= mxcode && max_ent < db->maxmaxcode)
+      {
+	db->n_bits++;
+      }
+
+    /* If output length is too large then this is an incomplete frame. */
+    if (wptr == NULL)
+      {
+	++db->incomp_count;
+	db->incomp_bytes += isize;
+	olen              = 0;
+      }
+    else /* Count the number of compressed frames */
+      {
+	++db->comp_count;
+	db->comp_bytes += olen;
+      }
+
+    /* Return the resulting output length */
+    return olen;
+#undef OUTPUT
+#undef PUTBYTE
+  }
+
+/*
+ * Update the "BSD Compress" dictionary on the receiver for
+ * incompressible data by pretending to compress the incoming data.
+ */
+
+static void bsd_incomp (void *state, unsigned char *ibuf, int icnt)
+  {
+    (void) bsd_compress (state, ibuf, (char *) 0, icnt, 0);
+  }
+
+/*
+ * Decompress "BSD Compress".
+ *
+ * Because of patent problems, we return DECOMP_ERROR for errors
+ * found by inspecting the input data and for system problems, but
+ * DECOMP_FATALERROR for any errors which could possibly be said to
+ * be being detected "after" decompression.  For DECOMP_ERROR,
+ * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
+ * infringing a patent of Motorola's if we do, so we take CCP down
+ * instead.
+ *
+ * Given that the frame has the correct sequence number and a good FCS,
+ * errors such as invalid codes in the input most likely indicate a
+ * bug, so we return DECOMP_FATALERROR for them in order to turn off
+ * compression, even though they are detected by inspecting the input.
+ */
+
+static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
+			   unsigned char *obuf, int osize)
+  {
+    struct bsd_db *db;
+    unsigned int max_ent;
+    unsigned long accm;
+    unsigned int bitno;		/* 1st valid bit in accm */
+    unsigned int n_bits;
+    unsigned int tgtbitno;	/* bitno when we have a code */
+    struct bsd_dict *dictp;
+    int explen;
+    int seq;
+    unsigned int incode;
+    unsigned int oldcode;
+    unsigned int finchar;
+    unsigned char *p;
+    unsigned char *wptr;
+    int adrs;
+    int ctrl;
+    int ilen;
+    int codelen;
+    int extra;
+
+    db       = (struct bsd_db *) state;
+    max_ent  = db->max_ent;
+    accm     = 0;
+    bitno    = 32;		/* 1st valid bit in accm */
+    n_bits   = db->n_bits;
+    tgtbitno = 32 - n_bits;	/* bitno when we have a code */
+
+    /*
+     * Save the address/control from the PPP header
+     * and then get the sequence number.
+     */
+
+    adrs  = PPP_ADDRESS (ibuf);
+    ctrl  = PPP_CONTROL (ibuf);
+
+    seq   = (ibuf[4] << 8) + ibuf[5];
+
+    ibuf += (PPP_HDRLEN + 2);
+    ilen  = isize - (PPP_HDRLEN + 2);
+
+    /*
+     * Check the sequence number and give up if it differs from
+     * the value we're expecting.
+     */
+
+    if (seq != db->seqno)
+      {
+	if (db->debug)
+	  {
+	    printk("bsd_decomp%d: bad sequence # %d, expected %d\n",
+		   db->unit, seq, db->seqno - 1);
+	  }
+	return DECOMP_ERROR;
+      }
+
+    ++db->seqno;
+    db->bytes_out += ilen;
+
+    /*
+     * Fill in the ppp header, but not the last byte of the protocol
+     * (that comes from the decompressed data).
+     */
+
+    wptr    = obuf;
+    *wptr++ = adrs;
+    *wptr++ = ctrl;
+    *wptr++ = 0;
+
+    oldcode = CLEAR;
+    explen  = 3;
+
+    /*
+     * Keep the checkpoint correctly so that incompressible packets
+     * clear the dictionary at the proper times.
+     */
+
+    for (;;)
+      {
+	if (ilen-- <= 0)
+	  {
+	    db->in_count += (explen - 3); /* don't count the header */
+	    break;
+	  }
+
+	/*
+	 * Accumulate bytes until we have a complete code.
+	 * Then get the next code, relying on the 32-bit,
+	 * unsigned accm to mask the result.
+	 */
+
+	bitno -= 8;
+	accm  |= *ibuf++ << bitno;
+	if (tgtbitno < bitno)
+	  {
+	    continue;
+	  }
+
+	incode = accm >> tgtbitno;
+	accm <<= n_bits;
+	bitno += n_bits;
+
+	/*
+	 * The dictionary must only be cleared at the end of a packet.
+	 */
+
+	if (incode == CLEAR)
+	  {
+	    if (ilen > 0)
+	      {
+		if (db->debug)
+		  {
+		    printk("bsd_decomp%d: bad CLEAR\n", db->unit);
+		  }
+		return DECOMP_FATALERROR;	/* probably a bug */
+	      }
+
+	    bsd_clear(db);
+	    break;
+	  }
+
+	if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
+	    || (incode > max_ent && oldcode == CLEAR))
+	  {
+	    if (db->debug)
+	      {
+		printk("bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
+		       db->unit, incode, oldcode);
+		printk("max_ent=0x%x explen=%d seqno=%d\n",
+		       max_ent, explen, db->seqno);
+	      }
+	    return DECOMP_FATALERROR;	/* probably a bug */
+	  }
+
+	/* Special case for KwKwK string. */
+	if (incode > max_ent)
+	  {
+	    finchar = oldcode;
+	    extra   = 1;
+	  }
+	else
+	  {
+	    finchar = incode;
+	    extra   = 0;
+	  }
+
+	codelen = *(lens_ptr (db, finchar));
+	explen += codelen + extra;
+	if (explen > osize)
+	  {
+	    if (db->debug)
+	      {
+		printk("bsd_decomp%d: ran out of mru\n", db->unit);
+#ifdef DEBUG
+		printk("  len=%d, finchar=0x%x, codelen=%d, explen=%d\n",
+		       ilen, finchar, codelen, explen);
+#endif
+	      }
+	    return DECOMP_FATALERROR;
+	  }
+
+	/*
+	 * Decode this code and install it in the decompressed buffer.
+	 */
+
+	wptr += codelen;
+	p     = wptr;
+	while (finchar > LAST)
+	  {
+	    struct bsd_dict *dictp2 = dict_ptr (db, finchar);
+
+	    dictp = dict_ptr (db, dictp2->cptr);
+#ifdef DEBUG
+	    if (--codelen <= 0 || dictp->codem1 != finchar-1)
+	      {
+		if (codelen <= 0)
+		  {
+		    printk("bsd_decomp%d: fell off end of chain ", db->unit);
+		    printk("0x%x at 0x%x by 0x%x, max_ent=0x%x\n",
+			   incode, finchar, dictp2->cptr, max_ent);
+		  }
+		else
+		  {
+		    if (dictp->codem1 != finchar-1)
+		      {
+			printk("bsd_decomp%d: bad code chain 0x%x "
+			       "finchar=0x%x ",
+			       db->unit, incode, finchar);
+
+			printk("oldcode=0x%x cptr=0x%x codem1=0x%x\n",
+			       oldcode, dictp2->cptr, dictp->codem1);
+		      }
+		  }
+		return DECOMP_FATALERROR;
+	      }
+#endif
+	    *--p    = dictp->f.hs.suffix;
+	    finchar = dictp->f.hs.prefix;
+	  }
+	*--p = finchar;
+
+#ifdef DEBUG
+	if (--codelen != 0)
+	  {
+	    printk("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n",
+		   db->unit, codelen, incode, max_ent);
+	  }
+#endif
+
+	if (extra)		/* the KwKwK case again */
+	  {
+	    *wptr++ = finchar;
+	  }
+
+	/*
+	 * If not first code in a packet, and
+	 * if not out of code space, then allocate a new code.
+	 *
+	 * Keep the hash table correct so it can be used
+	 * with uncompressed packets.
+	 */
+
+	if (oldcode != CLEAR && max_ent < db->maxmaxcode)
+	  {
+	    struct bsd_dict *dictp2, *dictp3;
+	    unsigned short  *lens1,  *lens2;
+	    unsigned long fcode;
+	    int hval, disp, indx;
+
+	    fcode = BSD_KEY(oldcode,finchar);
+	    hval  = BSD_HASH(oldcode,finchar,db->hshift);
+	    dictp = dict_ptr (db, hval);
+
+	    /* look for a free hash table entry */
+	    if (dictp->codem1 < max_ent)
+	      {
+		disp = (hval == 0) ? 1 : hval;
+		do
+		  {
+		    hval += disp;
+		    if (hval >= db->hsize)
+		      {
+			hval -= db->hsize;
+		      }
+		    dictp = dict_ptr (db, hval);
+		  }
+		while (dictp->codem1 < max_ent);
+	      }
+
+	    /*
+	     * Invalidate previous hash table entry
+	     * assigned this code, and then take it over
+	     */
+
+	    dictp2 = dict_ptr (db, max_ent + 1);
+	    indx   = dictp2->cptr;
+	    dictp3 = dict_ptr (db, indx);
+
+	    if (dictp3->codem1 == max_ent)
+	      {
+		dictp3->codem1 = BADCODEM1;
+	      }
+
+	    dictp2->cptr   = hval;
+	    dictp->codem1  = max_ent;
+	    dictp->f.fcode = fcode;
+	    db->max_ent    = ++max_ent;
+
+	    /* Update the length of this string. */
+	    lens1  = lens_ptr (db, max_ent);
+	    lens2  = lens_ptr (db, oldcode);
+	    *lens1 = *lens2 + 1;
+
+	    /* Expand code size if needed. */
+	    if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
+	      {
+		db->n_bits = ++n_bits;
+		tgtbitno   = 32-n_bits;
+	      }
+	  }
+	oldcode = incode;
+      }
+
+    ++db->comp_count;
+    ++db->uncomp_count;
+    db->comp_bytes   += isize - BSD_OVHD - PPP_HDRLEN;
+    db->uncomp_bytes += explen;
+
+    if (bsd_check(db))
+      {
+	if (db->debug)
+	  {
+	    printk("bsd_decomp%d: peer should have cleared dictionary on %d\n",
+		   db->unit, db->seqno - 1);
+	  }
+      }
+    return explen;
+  }
+
+/*************************************************************
+ * Table of addresses for the BSD compression module
+ *************************************************************/
+
+static struct compressor ppp_bsd_compress = {
+	.compress_proto =	CI_BSD_COMPRESS,
+	.comp_alloc =		bsd_comp_alloc,
+	.comp_free =		bsd_free,
+	.comp_init =		bsd_comp_init,
+	.comp_reset =		bsd_reset,
+	.compress =		bsd_compress,
+	.comp_stat =		bsd_comp_stats,
+	.decomp_alloc =		bsd_decomp_alloc,
+	.decomp_free =		bsd_free,
+	.decomp_init =		bsd_decomp_init,
+	.decomp_reset =		bsd_reset,
+	.decompress =		bsd_decompress,
+	.incomp =		bsd_incomp,
+	.decomp_stat =		bsd_comp_stats,
+	.owner =		THIS_MODULE
+};
+
+/*************************************************************
+ * Module support routines
+ *************************************************************/
+
+static int __init bsdcomp_init(void)
+{
+	int answer = ppp_register_compressor(&ppp_bsd_compress);
+	if (answer == 0)
+		printk(KERN_INFO "PPP BSD Compression module registered\n");
+	return answer;
+}
+
+static void __exit bsdcomp_cleanup(void)
+{
+	ppp_unregister_compressor(&ppp_bsd_compress);
+}
+
+module_init(bsdcomp_init);
+module_exit(bsdcomp_cleanup);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
new file mode 100644
index 0000000..bdc4d23
--- /dev/null
+++ b/drivers/net/ppp/ppp_async.c
@@ -0,0 +1,1027 @@
+/*
+ * PPP async serial channel driver for Linux.
+ *
+ * Copyright 1999 Paul Mackerras.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * This driver provides the encapsulation and framing for sending
+ * and receiving PPP frames over async serial lines.  It relies on
+ * the generic PPP layer to give it frames to send and to process
+ * received frames.  It implements the PPP line discipline.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/crc-ccitt.h>
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/ppp_channel.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+#include <asm/string.h>
+
+#define PPP_VERSION	"2.4.2"
+
+#define OBUFSIZE	4096
+
+/* Structure for storing local state. */
+struct asyncppp {
+	struct tty_struct *tty;
+	unsigned int	flags;
+	unsigned int	state;
+	unsigned int	rbits;
+	int		mru;
+	spinlock_t	xmit_lock;
+	spinlock_t	recv_lock;
+	unsigned long	xmit_flags;
+	u32		xaccm[8];
+	u32		raccm;
+	unsigned int	bytes_sent;
+	unsigned int	bytes_rcvd;
+
+	struct sk_buff	*tpkt;
+	int		tpkt_pos;
+	u16		tfcs;
+	unsigned char	*optr;
+	unsigned char	*olim;
+	unsigned long	last_xmit;
+
+	struct sk_buff	*rpkt;
+	int		lcp_fcs;
+	struct sk_buff_head rqueue;
+
+	struct tasklet_struct tsk;
+
+	refcount_t	refcnt;
+	struct semaphore dead_sem;
+	struct ppp_channel chan;	/* interface to generic ppp layer */
+	unsigned char	obuf[OBUFSIZE];
+};
+
+/* Bit numbers in xmit_flags */
+#define XMIT_WAKEUP	0
+#define XMIT_FULL	1
+#define XMIT_BUSY	2
+
+/* State bits */
+#define SC_TOSS		1
+#define SC_ESCAPE	2
+#define SC_PREV_ERROR	4
+
+/* Bits in rbits */
+#define SC_RCV_BITS	(SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+static int flag_time = HZ;
+module_param(flag_time, int, 0);
+MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_PPP);
+
+/*
+ * Prototypes.
+ */
+static int ppp_async_encode(struct asyncppp *ap);
+static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
+static int ppp_async_push(struct asyncppp *ap);
+static void ppp_async_flush_output(struct asyncppp *ap);
+static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
+			    char *flags, int count);
+static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
+			   unsigned long arg);
+static void ppp_async_process(unsigned long arg);
+
+static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
+			   int len, int inbound);
+
+static const struct ppp_channel_ops async_ops = {
+	.start_xmit = ppp_async_send,
+	.ioctl      = ppp_async_ioctl,
+};
+
+/*
+ * Routines implementing the PPP line discipline.
+ */
+
+/*
+ * We have a potential race on dereferencing tty->disc_data,
+ * because the tty layer provides no locking at all - thus one
+ * cpu could be running ppp_asynctty_receive while another
+ * calls ppp_asynctty_close, which zeroes tty->disc_data and
+ * frees the memory that ppp_asynctty_receive is using.  The best
+ * way to fix this is to use a rwlock in the tty struct, but for now
+ * we use a single global rwlock for all ttys in ppp line discipline.
+ *
+ * FIXME: this is no longer true. The _close path for the ldisc is
+ * now guaranteed to be sane.
+ */
+static DEFINE_RWLOCK(disc_data_lock);
+
+static struct asyncppp *ap_get(struct tty_struct *tty)
+{
+	struct asyncppp *ap;
+
+	read_lock(&disc_data_lock);
+	ap = tty->disc_data;
+	if (ap != NULL)
+		refcount_inc(&ap->refcnt);
+	read_unlock(&disc_data_lock);
+	return ap;
+}
+
+static void ap_put(struct asyncppp *ap)
+{
+	if (refcount_dec_and_test(&ap->refcnt))
+		up(&ap->dead_sem);
+}
+
+/*
+ * Called when a tty is put into PPP line discipline. Called in process
+ * context.
+ */
+static int
+ppp_asynctty_open(struct tty_struct *tty)
+{
+	struct asyncppp *ap;
+	int err;
+	int speed;
+
+	if (tty->ops->write == NULL)
+		return -EOPNOTSUPP;
+
+	err = -ENOMEM;
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+	if (!ap)
+		goto out;
+
+	/* initialize the asyncppp structure */
+	ap->tty = tty;
+	ap->mru = PPP_MRU;
+	spin_lock_init(&ap->xmit_lock);
+	spin_lock_init(&ap->recv_lock);
+	ap->xaccm[0] = ~0U;
+	ap->xaccm[3] = 0x60000000U;
+	ap->raccm = ~0U;
+	ap->optr = ap->obuf;
+	ap->olim = ap->obuf;
+	ap->lcp_fcs = -1;
+
+	skb_queue_head_init(&ap->rqueue);
+	tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
+
+	refcount_set(&ap->refcnt, 1);
+	sema_init(&ap->dead_sem, 0);
+
+	ap->chan.private = ap;
+	ap->chan.ops = &async_ops;
+	ap->chan.mtu = PPP_MRU;
+	speed = tty_get_baud_rate(tty);
+	ap->chan.speed = speed;
+	err = ppp_register_channel(&ap->chan);
+	if (err)
+		goto out_free;
+
+	tty->disc_data = ap;
+	tty->receive_room = 65536;
+	return 0;
+
+ out_free:
+	kfree(ap);
+ out:
+	return err;
+}
+
+/*
+ * Called when the tty is put into another line discipline
+ * or it hangs up.  We have to wait for any cpu currently
+ * executing in any of the other ppp_asynctty_* routines to
+ * finish before we can call ppp_unregister_channel and free
+ * the asyncppp struct.  This routine must be called from
+ * process context, not interrupt or softirq context.
+ */
+static void
+ppp_asynctty_close(struct tty_struct *tty)
+{
+	struct asyncppp *ap;
+
+	write_lock_irq(&disc_data_lock);
+	ap = tty->disc_data;
+	tty->disc_data = NULL;
+	write_unlock_irq(&disc_data_lock);
+	if (!ap)
+		return;
+
+	/*
+	 * We have now ensured that nobody can start using ap from now
+	 * on, but we have to wait for all existing users to finish.
+	 * Note that ppp_unregister_channel ensures that no calls to
+	 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
+	 * by the time it returns.
+	 */
+	if (!refcount_dec_and_test(&ap->refcnt))
+		down(&ap->dead_sem);
+	tasklet_kill(&ap->tsk);
+
+	ppp_unregister_channel(&ap->chan);
+	kfree_skb(ap->rpkt);
+	skb_queue_purge(&ap->rqueue);
+	kfree_skb(ap->tpkt);
+	kfree(ap);
+}
+
+/*
+ * Called on tty hangup in process context.
+ *
+ * Wait for I/O to driver to complete and unregister PPP channel.
+ * This is already done by the close routine, so just call that.
+ */
+static int ppp_asynctty_hangup(struct tty_struct *tty)
+{
+	ppp_asynctty_close(tty);
+	return 0;
+}
+
+/*
+ * Read does nothing - no data is ever available this way.
+ * Pppd reads and writes packets via /dev/ppp instead.
+ */
+static ssize_t
+ppp_asynctty_read(struct tty_struct *tty, struct file *file,
+		  unsigned char __user *buf, size_t count)
+{
+	return -EAGAIN;
+}
+
+/*
+ * Write on the tty does nothing, the packets all come in
+ * from the ppp generic stuff.
+ */
+static ssize_t
+ppp_asynctty_write(struct tty_struct *tty, struct file *file,
+		   const unsigned char *buf, size_t count)
+{
+	return -EAGAIN;
+}
+
+/*
+ * Called in process context only. May be re-entered by multiple
+ * ioctl calling threads.
+ */
+
+static int
+ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
+		   unsigned int cmd, unsigned long arg)
+{
+	struct asyncppp *ap = ap_get(tty);
+	int err, val;
+	int __user *p = (int __user *)arg;
+
+	if (!ap)
+		return -ENXIO;
+	err = -EFAULT;
+	switch (cmd) {
+	case PPPIOCGCHAN:
+		err = -EFAULT;
+		if (put_user(ppp_channel_index(&ap->chan), p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGUNIT:
+		err = -EFAULT;
+		if (put_user(ppp_unit_number(&ap->chan), p))
+			break;
+		err = 0;
+		break;
+
+	case TCFLSH:
+		/* flush our buffers and the serial port's buffer */
+		if (arg == TCIOFLUSH || arg == TCOFLUSH)
+			ppp_async_flush_output(ap);
+		err = n_tty_ioctl_helper(tty, file, cmd, arg);
+		break;
+
+	case FIONREAD:
+		val = 0;
+		if (put_user(val, p))
+			break;
+		err = 0;
+		break;
+
+	default:
+		/* Try the various mode ioctls */
+		err = tty_mode_ioctl(tty, file, cmd, arg);
+	}
+
+	ap_put(ap);
+	return err;
+}
+
+/* No kernel lock - fine */
+static __poll_t
+ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
+{
+	return 0;
+}
+
+/* May sleep, don't call from interrupt level or with interrupts disabled */
+static void
+ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
+		  char *cflags, int count)
+{
+	struct asyncppp *ap = ap_get(tty);
+	unsigned long flags;
+
+	if (!ap)
+		return;
+	spin_lock_irqsave(&ap->recv_lock, flags);
+	ppp_async_input(ap, buf, cflags, count);
+	spin_unlock_irqrestore(&ap->recv_lock, flags);
+	if (!skb_queue_empty(&ap->rqueue))
+		tasklet_schedule(&ap->tsk);
+	ap_put(ap);
+	tty_unthrottle(tty);
+}
+
+static void
+ppp_asynctty_wakeup(struct tty_struct *tty)
+{
+	struct asyncppp *ap = ap_get(tty);
+
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	if (!ap)
+		return;
+	set_bit(XMIT_WAKEUP, &ap->xmit_flags);
+	tasklet_schedule(&ap->tsk);
+	ap_put(ap);
+}
+
+
+static struct tty_ldisc_ops ppp_ldisc = {
+	.owner  = THIS_MODULE,
+	.magic	= TTY_LDISC_MAGIC,
+	.name	= "ppp",
+	.open	= ppp_asynctty_open,
+	.close	= ppp_asynctty_close,
+	.hangup	= ppp_asynctty_hangup,
+	.read	= ppp_asynctty_read,
+	.write	= ppp_asynctty_write,
+	.ioctl	= ppp_asynctty_ioctl,
+	.poll	= ppp_asynctty_poll,
+	.receive_buf = ppp_asynctty_receive,
+	.write_wakeup = ppp_asynctty_wakeup,
+};
+
+static int __init
+ppp_async_init(void)
+{
+	int err;
+
+	err = tty_register_ldisc(N_PPP, &ppp_ldisc);
+	if (err != 0)
+		printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
+		       err);
+	return err;
+}
+
+/*
+ * The following routines provide the PPP channel interface.
+ */
+static int
+ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+{
+	struct asyncppp *ap = chan->private;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int err, val;
+	u32 accm[8];
+
+	err = -EFAULT;
+	switch (cmd) {
+	case PPPIOCGFLAGS:
+		val = ap->flags | ap->rbits;
+		if (put_user(val, p))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSFLAGS:
+		if (get_user(val, p))
+			break;
+		ap->flags = val & ~SC_RCV_BITS;
+		spin_lock_irq(&ap->recv_lock);
+		ap->rbits = val & SC_RCV_BITS;
+		spin_unlock_irq(&ap->recv_lock);
+		err = 0;
+		break;
+
+	case PPPIOCGASYNCMAP:
+		if (put_user(ap->xaccm[0], (u32 __user *)argp))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSASYNCMAP:
+		if (get_user(ap->xaccm[0], (u32 __user *)argp))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGRASYNCMAP:
+		if (put_user(ap->raccm, (u32 __user *)argp))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSRASYNCMAP:
+		if (get_user(ap->raccm, (u32 __user *)argp))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGXASYNCMAP:
+		if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSXASYNCMAP:
+		if (copy_from_user(accm, argp, sizeof(accm)))
+			break;
+		accm[2] &= ~0x40000000U;	/* can't escape 0x5e */
+		accm[3] |= 0x60000000U;		/* must escape 0x7d, 0x7e */
+		memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
+		err = 0;
+		break;
+
+	case PPPIOCGMRU:
+		if (put_user(ap->mru, p))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSMRU:
+		if (get_user(val, p))
+			break;
+		if (val < PPP_MRU)
+			val = PPP_MRU;
+		ap->mru = val;
+		err = 0;
+		break;
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+/*
+ * This is called at softirq level to deliver received packets
+ * to the ppp_generic code, and to tell the ppp_generic code
+ * if we can accept more output now.
+ */
+static void ppp_async_process(unsigned long arg)
+{
+	struct asyncppp *ap = (struct asyncppp *) arg;
+	struct sk_buff *skb;
+
+	/* process received packets */
+	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
+		if (skb->cb[0])
+			ppp_input_error(&ap->chan, 0);
+		ppp_input(&ap->chan, skb);
+	}
+
+	/* try to push more stuff out */
+	if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
+		ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Procedures for encapsulation and framing.
+ */
+
+/*
+ * Procedure to encode the data for async serial transmission.
+ * Does octet stuffing (escaping), puts the address/control bytes
+ * on if A/C compression is disabled, and does protocol compression.
+ * Assumes ap->tpkt != 0 on entry.
+ * Returns 1 if we finished the current frame, 0 otherwise.
+ */
+
+#define PUT_BYTE(ap, buf, c, islcp)	do {		\
+	if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
+		*buf++ = PPP_ESCAPE;			\
+		*buf++ = c ^ PPP_TRANS;			\
+	} else						\
+		*buf++ = c;				\
+} while (0)
+
+static int
+ppp_async_encode(struct asyncppp *ap)
+{
+	int fcs, i, count, c, proto;
+	unsigned char *buf, *buflim;
+	unsigned char *data;
+	int islcp;
+
+	buf = ap->obuf;
+	ap->olim = buf;
+	ap->optr = buf;
+	i = ap->tpkt_pos;
+	data = ap->tpkt->data;
+	count = ap->tpkt->len;
+	fcs = ap->tfcs;
+	proto = get_unaligned_be16(data);
+
+	/*
+	 * LCP packets with code values between 1 (configure-reqest)
+	 * and 7 (code-reject) must be sent as though no options
+	 * had been negotiated.
+	 */
+	islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+	if (i == 0) {
+		if (islcp)
+			async_lcp_peek(ap, data, count, 0);
+
+		/*
+		 * Start of a new packet - insert the leading FLAG
+		 * character if necessary.
+		 */
+		if (islcp || flag_time == 0 ||
+		    time_after_eq(jiffies, ap->last_xmit + flag_time))
+			*buf++ = PPP_FLAG;
+		ap->last_xmit = jiffies;
+		fcs = PPP_INITFCS;
+
+		/*
+		 * Put in the address/control bytes if necessary
+		 */
+		if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
+			PUT_BYTE(ap, buf, 0xff, islcp);
+			fcs = PPP_FCS(fcs, 0xff);
+			PUT_BYTE(ap, buf, 0x03, islcp);
+			fcs = PPP_FCS(fcs, 0x03);
+		}
+	}
+
+	/*
+	 * Once we put in the last byte, we need to put in the FCS
+	 * and closing flag, so make sure there is at least 7 bytes
+	 * of free space in the output buffer.
+	 */
+	buflim = ap->obuf + OBUFSIZE - 6;
+	while (i < count && buf < buflim) {
+		c = data[i++];
+		if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
+			continue;	/* compress protocol field */
+		fcs = PPP_FCS(fcs, c);
+		PUT_BYTE(ap, buf, c, islcp);
+	}
+
+	if (i < count) {
+		/*
+		 * Remember where we are up to in this packet.
+		 */
+		ap->olim = buf;
+		ap->tpkt_pos = i;
+		ap->tfcs = fcs;
+		return 0;
+	}
+
+	/*
+	 * We have finished the packet.  Add the FCS and flag.
+	 */
+	fcs = ~fcs;
+	c = fcs & 0xff;
+	PUT_BYTE(ap, buf, c, islcp);
+	c = (fcs >> 8) & 0xff;
+	PUT_BYTE(ap, buf, c, islcp);
+	*buf++ = PPP_FLAG;
+	ap->olim = buf;
+
+	consume_skb(ap->tpkt);
+	ap->tpkt = NULL;
+	return 1;
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/*
+ * Send a packet to the peer over an async tty line.
+ * Returns 1 iff the packet was accepted.
+ * If the packet was not accepted, we will call ppp_output_wakeup
+ * at some later time.
+ */
+static int
+ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct asyncppp *ap = chan->private;
+
+	ppp_async_push(ap);
+
+	if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
+		return 0;	/* already full */
+	ap->tpkt = skb;
+	ap->tpkt_pos = 0;
+
+	ppp_async_push(ap);
+	return 1;
+}
+
+/*
+ * Push as much data as possible out to the tty.
+ */
+static int
+ppp_async_push(struct asyncppp *ap)
+{
+	int avail, sent, done = 0;
+	struct tty_struct *tty = ap->tty;
+	int tty_stuffed = 0;
+
+	/*
+	 * We can get called recursively here if the tty write
+	 * function calls our wakeup function.  This can happen
+	 * for example on a pty with both the master and slave
+	 * set to PPP line discipline.
+	 * We use the XMIT_BUSY bit to detect this and get out,
+	 * leaving the XMIT_WAKEUP bit set to tell the other
+	 * instance that it may now be able to write more now.
+	 */
+	if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
+		return 0;
+	spin_lock_bh(&ap->xmit_lock);
+	for (;;) {
+		if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
+			tty_stuffed = 0;
+		if (!tty_stuffed && ap->optr < ap->olim) {
+			avail = ap->olim - ap->optr;
+			set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+			sent = tty->ops->write(tty, ap->optr, avail);
+			if (sent < 0)
+				goto flush;	/* error, e.g. loss of CD */
+			ap->optr += sent;
+			if (sent < avail)
+				tty_stuffed = 1;
+			continue;
+		}
+		if (ap->optr >= ap->olim && ap->tpkt) {
+			if (ppp_async_encode(ap)) {
+				/* finished processing ap->tpkt */
+				clear_bit(XMIT_FULL, &ap->xmit_flags);
+				done = 1;
+			}
+			continue;
+		}
+		/*
+		 * We haven't made any progress this time around.
+		 * Clear XMIT_BUSY to let other callers in, but
+		 * after doing so we have to check if anyone set
+		 * XMIT_WAKEUP since we last checked it.  If they
+		 * did, we should try again to set XMIT_BUSY and go
+		 * around again in case XMIT_BUSY was still set when
+		 * the other caller tried.
+		 */
+		clear_bit(XMIT_BUSY, &ap->xmit_flags);
+		/* any more work to do? if not, exit the loop */
+		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
+		      (!tty_stuffed && ap->tpkt)))
+			break;
+		/* more work to do, see if we can do it now */
+		if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
+			break;
+	}
+	spin_unlock_bh(&ap->xmit_lock);
+	return done;
+
+flush:
+	clear_bit(XMIT_BUSY, &ap->xmit_flags);
+	if (ap->tpkt) {
+		kfree_skb(ap->tpkt);
+		ap->tpkt = NULL;
+		clear_bit(XMIT_FULL, &ap->xmit_flags);
+		done = 1;
+	}
+	ap->optr = ap->olim;
+	spin_unlock_bh(&ap->xmit_lock);
+	return done;
+}
+
+/*
+ * Flush output from our internal buffers.
+ * Called for the TCFLSH ioctl. Can be entered in parallel
+ * but this is covered by the xmit_lock.
+ */
+static void
+ppp_async_flush_output(struct asyncppp *ap)
+{
+	int done = 0;
+
+	spin_lock_bh(&ap->xmit_lock);
+	ap->optr = ap->olim;
+	if (ap->tpkt != NULL) {
+		kfree_skb(ap->tpkt);
+		ap->tpkt = NULL;
+		clear_bit(XMIT_FULL, &ap->xmit_flags);
+		done = 1;
+	}
+	spin_unlock_bh(&ap->xmit_lock);
+	if (done)
+		ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Receive-side routines.
+ */
+
+/* see how many ordinary chars there are at the start of buf */
+static inline int
+scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
+{
+	int i, c;
+
+	for (i = 0; i < count; ++i) {
+		c = buf[i];
+		if (c == PPP_ESCAPE || c == PPP_FLAG ||
+		    (c < 0x20 && (ap->raccm & (1 << c)) != 0))
+			break;
+	}
+	return i;
+}
+
+/* called when a flag is seen - do end-of-packet processing */
+static void
+process_input_packet(struct asyncppp *ap)
+{
+	struct sk_buff *skb;
+	unsigned char *p;
+	unsigned int len, fcs, proto;
+
+	skb = ap->rpkt;
+	if (ap->state & (SC_TOSS | SC_ESCAPE))
+		goto err;
+
+	if (skb == NULL)
+		return;		/* 0-length packet */
+
+	/* check the FCS */
+	p = skb->data;
+	len = skb->len;
+	if (len < 3)
+		goto err;	/* too short */
+	fcs = PPP_INITFCS;
+	for (; len > 0; --len)
+		fcs = PPP_FCS(fcs, *p++);
+	if (fcs != PPP_GOODFCS)
+		goto err;	/* bad FCS */
+	skb_trim(skb, skb->len - 2);
+
+	/* check for address/control and protocol compression */
+	p = skb->data;
+	if (p[0] == PPP_ALLSTATIONS) {
+		/* chop off address/control */
+		if (p[1] != PPP_UI || skb->len < 3)
+			goto err;
+		p = skb_pull(skb, 2);
+	}
+	proto = p[0];
+	if (proto & 1) {
+		/* protocol is compressed */
+		*(u8 *)skb_push(skb, 1) = 0;
+	} else {
+		if (skb->len < 2)
+			goto err;
+		proto = (proto << 8) + p[1];
+		if (proto == PPP_LCP)
+			async_lcp_peek(ap, p, skb->len, 1);
+	}
+
+	/* queue the frame to be processed */
+	skb->cb[0] = ap->state;
+	skb_queue_tail(&ap->rqueue, skb);
+	ap->rpkt = NULL;
+	ap->state = 0;
+	return;
+
+ err:
+	/* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
+	ap->state = SC_PREV_ERROR;
+	if (skb) {
+		/* make skb appear as freshly allocated */
+		skb_trim(skb, 0);
+		skb_reserve(skb, - skb_headroom(skb));
+	}
+}
+
+/* Called when the tty driver has data for us. Runs parallel with the
+   other ldisc functions but will not be re-entered */
+
+static void
+ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
+		char *flags, int count)
+{
+	struct sk_buff *skb;
+	int c, i, j, n, s, f;
+	unsigned char *sp;
+
+	/* update bits used for 8-bit cleanness detection */
+	if (~ap->rbits & SC_RCV_BITS) {
+		s = 0;
+		for (i = 0; i < count; ++i) {
+			c = buf[i];
+			if (flags && flags[i] != 0)
+				continue;
+			s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
+			c = ((c >> 4) ^ c) & 0xf;
+			s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
+		}
+		ap->rbits |= s;
+	}
+
+	while (count > 0) {
+		/* scan through and see how many chars we can do in bulk */
+		if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
+			n = 1;
+		else
+			n = scan_ordinary(ap, buf, count);
+
+		f = 0;
+		if (flags && (ap->state & SC_TOSS) == 0) {
+			/* check the flags to see if any char had an error */
+			for (j = 0; j < n; ++j)
+				if ((f = flags[j]) != 0)
+					break;
+		}
+		if (f != 0) {
+			/* start tossing */
+			ap->state |= SC_TOSS;
+
+		} else if (n > 0 && (ap->state & SC_TOSS) == 0) {
+			/* stuff the chars in the skb */
+			skb = ap->rpkt;
+			if (!skb) {
+				skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
+				if (!skb)
+					goto nomem;
+ 				ap->rpkt = skb;
+ 			}
+ 			if (skb->len == 0) {
+ 				/* Try to get the payload 4-byte aligned.
+ 				 * This should match the
+ 				 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
+ 				 * process_input_packet, but we do not have
+ 				 * enough chars here to test buf[1] and buf[2].
+ 				 */
+				if (buf[0] != PPP_ALLSTATIONS)
+					skb_reserve(skb, 2 + (buf[0] & 1));
+			}
+			if (n > skb_tailroom(skb)) {
+				/* packet overflowed MRU */
+				ap->state |= SC_TOSS;
+			} else {
+				sp = skb_put_data(skb, buf, n);
+				if (ap->state & SC_ESCAPE) {
+					sp[0] ^= PPP_TRANS;
+					ap->state &= ~SC_ESCAPE;
+				}
+			}
+		}
+
+		if (n >= count)
+			break;
+
+		c = buf[n];
+		if (flags != NULL && flags[n] != 0) {
+			ap->state |= SC_TOSS;
+		} else if (c == PPP_FLAG) {
+			process_input_packet(ap);
+		} else if (c == PPP_ESCAPE) {
+			ap->state |= SC_ESCAPE;
+		} else if (I_IXON(ap->tty)) {
+			if (c == START_CHAR(ap->tty))
+				start_tty(ap->tty);
+			else if (c == STOP_CHAR(ap->tty))
+				stop_tty(ap->tty);
+		}
+		/* otherwise it's a char in the recv ACCM */
+		++n;
+
+		buf += n;
+		if (flags)
+			flags += n;
+		count -= n;
+	}
+	return;
+
+ nomem:
+	printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
+	ap->state |= SC_TOSS;
+}
+
+/*
+ * We look at LCP frames going past so that we can notice
+ * and react to the LCP configure-ack from the peer.
+ * In the situation where the peer has been sent a configure-ack
+ * already, LCP is up once it has sent its configure-ack
+ * so the immediately following packet can be sent with the
+ * configured LCP options.  This allows us to process the following
+ * packet correctly without pppd needing to respond quickly.
+ *
+ * We only respond to the received configure-ack if we have just
+ * sent a configure-request, and the configure-ack contains the
+ * same data (this is checked using a 16-bit crc of the data).
+ */
+#define CONFREQ		1	/* LCP code field values */
+#define CONFACK		2
+#define LCP_MRU		1	/* LCP option numbers */
+#define LCP_ASYNCMAP	2
+
+static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
+			   int len, int inbound)
+{
+	int dlen, fcs, i, code;
+	u32 val;
+
+	data += 2;		/* skip protocol bytes */
+	len -= 2;
+	if (len < 4)		/* 4 = code, ID, length */
+		return;
+	code = data[0];
+	if (code != CONFACK && code != CONFREQ)
+		return;
+	dlen = get_unaligned_be16(data + 2);
+	if (len < dlen)
+		return;		/* packet got truncated or length is bogus */
+
+	if (code == (inbound? CONFACK: CONFREQ)) {
+		/*
+		 * sent confreq or received confack:
+		 * calculate the crc of the data from the ID field on.
+		 */
+		fcs = PPP_INITFCS;
+		for (i = 1; i < dlen; ++i)
+			fcs = PPP_FCS(fcs, data[i]);
+
+		if (!inbound) {
+			/* outbound confreq - remember the crc for later */
+			ap->lcp_fcs = fcs;
+			return;
+		}
+
+		/* received confack, check the crc */
+		fcs ^= ap->lcp_fcs;
+		ap->lcp_fcs = -1;
+		if (fcs != 0)
+			return;
+	} else if (inbound)
+		return;	/* not interested in received confreq */
+
+	/* process the options in the confack */
+	data += 4;
+	dlen -= 4;
+	/* data[0] is code, data[1] is length */
+	while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
+		switch (data[0]) {
+		case LCP_MRU:
+			val = get_unaligned_be16(data + 2);
+			if (inbound)
+				ap->mru = val;
+			else
+				ap->chan.mtu = val;
+			break;
+		case LCP_ASYNCMAP:
+			val = get_unaligned_be32(data + 2);
+			if (inbound)
+				ap->raccm = val;
+			else
+				ap->xaccm[0] = val;
+			break;
+		}
+		dlen -= data[1];
+		data += data[1];
+	}
+}
+
+static void __exit ppp_async_cleanup(void)
+{
+	if (tty_unregister_ldisc(N_PPP) != 0)
+		printk(KERN_ERR "failed to unregister PPP line discipline\n");
+}
+
+module_init(ppp_async_init);
+module_exit(ppp_async_cleanup);
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
new file mode 100644
index 0000000..b5edc7f
--- /dev/null
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -0,0 +1,631 @@
+/*
+ * ppp_deflate.c - interface the zlib procedures for Deflate compression
+ * and decompression (as used by gzip) to the PPP code.
+ *
+ * Copyright 1994-1998 Paul Mackerras.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <linux/ppp_defs.h>
+#include <linux/ppp-comp.h>
+
+#include <linux/zlib.h>
+#include <asm/unaligned.h>
+
+/*
+ * State for a Deflate (de)compressor.
+ */
+struct ppp_deflate_state {
+    int		seqno;
+    int		w_size;
+    int		unit;
+    int		mru;
+    int		debug;
+    z_stream	strm;
+    struct compstat stats;
+};
+
+#define DEFLATE_OVHD	2		/* Deflate overhead/packet */
+
+static void	*z_comp_alloc(unsigned char *options, int opt_len);
+static void	*z_decomp_alloc(unsigned char *options, int opt_len);
+static void	z_comp_free(void *state);
+static void	z_decomp_free(void *state);
+static int	z_comp_init(void *state, unsigned char *options,
+				 int opt_len,
+				 int unit, int hdrlen, int debug);
+static int	z_decomp_init(void *state, unsigned char *options,
+				   int opt_len,
+				   int unit, int hdrlen, int mru, int debug);
+static int	z_compress(void *state, unsigned char *rptr,
+				unsigned char *obuf,
+				int isize, int osize);
+static void	z_incomp(void *state, unsigned char *ibuf, int icnt);
+static int	z_decompress(void *state, unsigned char *ibuf,
+				int isize, unsigned char *obuf, int osize);
+static void	z_comp_reset(void *state);
+static void	z_decomp_reset(void *state);
+static void	z_comp_stats(void *state, struct compstat *stats);
+
+/**
+ *	z_comp_free - free the memory used by a compressor
+ *	@arg:	pointer to the private state for the compressor.
+ */
+static void z_comp_free(void *arg)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	if (state) {
+		zlib_deflateEnd(&state->strm);
+		vfree(state->strm.workspace);
+		kfree(state);
+	}
+}
+
+/**
+ *	z_comp_alloc - allocate space for a compressor.
+ *	@options: pointer to CCP option data
+ *	@opt_len: length of the CCP option at @options.
+ *
+ *	The @options pointer points to the a buffer containing the
+ *	CCP option data for the compression being negotiated.  It is
+ *	formatted according to RFC1979, and describes the window
+ *	size that the peer is requesting that we use in compressing
+ *	data to be sent to it.
+ *
+ *	Returns the pointer to the private state for the compressor,
+ *	or NULL if we could not allocate enough memory.
+ */
+static void *z_comp_alloc(unsigned char *options, int opt_len)
+{
+	struct ppp_deflate_state *state;
+	int w_size;
+
+	if (opt_len != CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
+		return NULL;
+	w_size = DEFLATE_SIZE(options[2]);
+	if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
+		return NULL;
+
+	state = kzalloc(sizeof(*state),
+						     GFP_KERNEL);
+	if (state == NULL)
+		return NULL;
+
+	state->strm.next_in   = NULL;
+	state->w_size         = w_size;
+	state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
+	if (state->strm.workspace == NULL)
+		goto out_free;
+
+	if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
+			 DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY)
+	    != Z_OK)
+		goto out_free;
+	return (void *) state;
+
+out_free:
+	z_comp_free(state);
+	return NULL;
+}
+
+/**
+ *	z_comp_init - initialize a previously-allocated compressor.
+ *	@arg:	pointer to the private state for the compressor
+ *	@options: pointer to the CCP option data describing the
+ *		compression that was negotiated with the peer
+ *	@opt_len: length of the CCP option data at @options
+ *	@unit:	PPP unit number for diagnostic messages
+ *	@hdrlen: ignored (present for backwards compatibility)
+ *	@debug:	debug flag; if non-zero, debug messages are printed.
+ *
+ *	The CCP options described by @options must match the options
+ *	specified when the compressor was allocated.  The compressor
+ *	history is reset.  Returns 0 for failure (CCP options don't
+ *	match) or 1 for success.
+ */
+static int z_comp_init(void *arg, unsigned char *options, int opt_len,
+		       int unit, int hdrlen, int debug)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	if (opt_len < CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    DEFLATE_SIZE(options[2]) != state->w_size ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
+		return 0;
+
+	state->seqno = 0;
+	state->unit  = unit;
+	state->debug = debug;
+
+	zlib_deflateReset(&state->strm);
+
+	return 1;
+}
+
+/**
+ *	z_comp_reset - reset a previously-allocated compressor.
+ *	@arg:	pointer to private state for the compressor.
+ *
+ *	This clears the history for the compressor and makes it
+ *	ready to start emitting a new compressed stream.
+ */
+static void z_comp_reset(void *arg)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	state->seqno = 0;
+	zlib_deflateReset(&state->strm);
+}
+
+/**
+ *	z_compress - compress a PPP packet with Deflate compression.
+ *	@arg:	pointer to private state for the compressor
+ *	@rptr:	uncompressed packet (input)
+ *	@obuf:	compressed packet (output)
+ *	@isize:	size of uncompressed packet
+ *	@osize:	space available at @obuf
+ *
+ *	Returns the length of the compressed packet, or 0 if the
+ *	packet is incompressible.
+ */
+static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
+	       int isize, int osize)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+	int r, proto, off, olen, oavail;
+	unsigned char *wptr;
+
+	/*
+	 * Check that the protocol is in the range we handle.
+	 */
+	proto = PPP_PROTOCOL(rptr);
+	if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
+		return 0;
+
+	/* Don't generate compressed packets which are larger than
+	   the uncompressed packet. */
+	if (osize > isize)
+		osize = isize;
+
+	wptr = obuf;
+
+	/*
+	 * Copy over the PPP header and store the 2-byte sequence number.
+	 */
+	wptr[0] = PPP_ADDRESS(rptr);
+	wptr[1] = PPP_CONTROL(rptr);
+	put_unaligned_be16(PPP_COMP, wptr + 2);
+	wptr += PPP_HDRLEN;
+	put_unaligned_be16(state->seqno, wptr);
+	wptr += DEFLATE_OVHD;
+	olen = PPP_HDRLEN + DEFLATE_OVHD;
+	state->strm.next_out = wptr;
+	state->strm.avail_out = oavail = osize - olen;
+	++state->seqno;
+
+	off = (proto > 0xff) ? 2 : 3;	/* skip 1st proto byte if 0 */
+	rptr += off;
+	state->strm.next_in = rptr;
+	state->strm.avail_in = (isize - off);
+
+	for (;;) {
+		r = zlib_deflate(&state->strm, Z_PACKET_FLUSH);
+		if (r != Z_OK) {
+			if (state->debug)
+				printk(KERN_ERR
+				       "z_compress: deflate returned %d\n", r);
+			break;
+		}
+		if (state->strm.avail_out == 0) {
+			olen += oavail;
+			state->strm.next_out = NULL;
+			state->strm.avail_out = oavail = 1000000;
+		} else {
+			break;		/* all done */
+		}
+	}
+	olen += oavail - state->strm.avail_out;
+
+	/*
+	 * See if we managed to reduce the size of the packet.
+	 */
+	if (olen < isize && olen <= osize) {
+		state->stats.comp_bytes += olen;
+		state->stats.comp_packets++;
+	} else {
+		state->stats.inc_bytes += isize;
+		state->stats.inc_packets++;
+		olen = 0;
+	}
+	state->stats.unc_bytes += isize;
+	state->stats.unc_packets++;
+
+	return olen;
+}
+
+/**
+ *	z_comp_stats - return compression statistics for a compressor
+ *		or decompressor.
+ *	@arg:	pointer to private space for the (de)compressor
+ *	@stats:	pointer to a struct compstat to receive the result.
+ */
+static void z_comp_stats(void *arg, struct compstat *stats)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	*stats = state->stats;
+}
+
+/**
+ *	z_decomp_free - Free the memory used by a decompressor.
+ *	@arg:	pointer to private space for the decompressor.
+ */
+static void z_decomp_free(void *arg)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	if (state) {
+		zlib_inflateEnd(&state->strm);
+		vfree(state->strm.workspace);
+		kfree(state);
+	}
+}
+
+/**
+ *	z_decomp_alloc - allocate space for a decompressor.
+ *	@options: pointer to CCP option data
+ *	@opt_len: length of the CCP option at @options.
+ *
+ *	The @options pointer points to the a buffer containing the
+ *	CCP option data for the compression being negotiated.  It is
+ *	formatted according to RFC1979, and describes the window
+ *	size that we are requesting the peer to use in compressing
+ *	data to be sent to us.
+ *
+ *	Returns the pointer to the private state for the decompressor,
+ *	or NULL if we could not allocate enough memory.
+ */
+static void *z_decomp_alloc(unsigned char *options, int opt_len)
+{
+	struct ppp_deflate_state *state;
+	int w_size;
+
+	if (opt_len != CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
+		return NULL;
+	w_size = DEFLATE_SIZE(options[2]);
+	if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
+		return NULL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state == NULL)
+		return NULL;
+
+	state->w_size         = w_size;
+	state->strm.next_out  = NULL;
+	state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
+	if (state->strm.workspace == NULL)
+		goto out_free;
+
+	if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK)
+		goto out_free;
+	return (void *) state;
+
+out_free:
+	z_decomp_free(state);
+	return NULL;
+}
+
+/**
+ *	z_decomp_init - initialize a previously-allocated decompressor.
+ *	@arg:	pointer to the private state for the decompressor
+ *	@options: pointer to the CCP option data describing the
+ *		compression that was negotiated with the peer
+ *	@opt_len: length of the CCP option data at @options
+ *	@unit:	PPP unit number for diagnostic messages
+ *	@hdrlen: ignored (present for backwards compatibility)
+ *	@mru:	maximum length of decompressed packets
+ *	@debug:	debug flag; if non-zero, debug messages are printed.
+ *
+ *	The CCP options described by @options must match the options
+ *	specified when the decompressor was allocated.  The decompressor
+ *	history is reset.  Returns 0 for failure (CCP options don't
+ *	match) or 1 for success.
+ */
+static int z_decomp_init(void *arg, unsigned char *options, int opt_len,
+			 int unit, int hdrlen, int mru, int debug)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	if (opt_len < CILEN_DEFLATE ||
+	    (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
+	    options[1] != CILEN_DEFLATE ||
+	    DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
+	    DEFLATE_SIZE(options[2]) != state->w_size ||
+	    options[3] != DEFLATE_CHK_SEQUENCE)
+		return 0;
+
+	state->seqno = 0;
+	state->unit  = unit;
+	state->debug = debug;
+	state->mru   = mru;
+
+	zlib_inflateReset(&state->strm);
+
+	return 1;
+}
+
+/**
+ *	z_decomp_reset - reset a previously-allocated decompressor.
+ *	@arg:	pointer to private state for the decompressor.
+ *
+ *	This clears the history for the decompressor and makes it
+ *	ready to receive a new compressed stream.
+ */
+static void z_decomp_reset(void *arg)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+	state->seqno = 0;
+	zlib_inflateReset(&state->strm);
+}
+
+/**
+ *	z_decompress - decompress a Deflate-compressed packet.
+ *	@arg:	pointer to private state for the decompressor
+ *	@ibuf:	pointer to input (compressed) packet data
+ *	@isize:	length of input packet
+ *	@obuf:	pointer to space for output (decompressed) packet
+ *	@osize:	amount of space available at @obuf
+ *
+ * Because of patent problems, we return DECOMP_ERROR for errors
+ * found by inspecting the input data and for system problems, but
+ * DECOMP_FATALERROR for any errors which could possibly be said to
+ * be being detected "after" decompression.  For DECOMP_ERROR,
+ * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
+ * infringing a patent of Motorola's if we do, so we take CCP down
+ * instead.
+ *
+ * Given that the frame has the correct sequence number and a good FCS,
+ * errors such as invalid codes in the input most likely indicate a
+ * bug, so we return DECOMP_FATALERROR for them in order to turn off
+ * compression, even though they are detected by inspecting the input.
+ */
+static int z_decompress(void *arg, unsigned char *ibuf, int isize,
+		 unsigned char *obuf, int osize)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+	int olen, seq, r;
+	int decode_proto, overflow;
+	unsigned char overflow_buf[1];
+
+	if (isize <= PPP_HDRLEN + DEFLATE_OVHD) {
+		if (state->debug)
+			printk(KERN_DEBUG "z_decompress%d: short pkt (%d)\n",
+			       state->unit, isize);
+		return DECOMP_ERROR;
+	}
+
+	/* Check the sequence number. */
+	seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
+	if (seq != (state->seqno & 0xffff)) {
+		if (state->debug)
+			printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
+			       state->unit, seq, state->seqno & 0xffff);
+		return DECOMP_ERROR;
+	}
+	++state->seqno;
+
+	/*
+	 * Fill in the first part of the PPP header.  The protocol field
+	 * comes from the decompressed data.
+	 */
+	obuf[0] = PPP_ADDRESS(ibuf);
+	obuf[1] = PPP_CONTROL(ibuf);
+	obuf[2] = 0;
+
+	/*
+	 * Set up to call inflate.  We set avail_out to 1 initially so we can
+	 * look at the first byte of the output and decide whether we have
+	 * a 1-byte or 2-byte protocol field.
+	 */
+	state->strm.next_in = ibuf + PPP_HDRLEN + DEFLATE_OVHD;
+	state->strm.avail_in = isize - (PPP_HDRLEN + DEFLATE_OVHD);
+	state->strm.next_out = obuf + 3;
+	state->strm.avail_out = 1;
+	decode_proto = 1;
+	overflow = 0;
+
+	/*
+	 * Call inflate, supplying more input or output as needed.
+	 */
+	for (;;) {
+		r = zlib_inflate(&state->strm, Z_PACKET_FLUSH);
+		if (r != Z_OK) {
+			if (state->debug)
+				printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n",
+				       state->unit, r, (state->strm.msg? state->strm.msg: ""));
+			return DECOMP_FATALERROR;
+		}
+		if (state->strm.avail_out != 0)
+			break;		/* all done */
+		if (decode_proto) {
+			state->strm.avail_out = osize - PPP_HDRLEN;
+			if ((obuf[3] & 1) == 0) {
+				/* 2-byte protocol field */
+				obuf[2] = obuf[3];
+				--state->strm.next_out;
+				++state->strm.avail_out;
+			}
+			decode_proto = 0;
+		} else if (!overflow) {
+			/*
+			 * We've filled up the output buffer; the only way to
+			 * find out whether inflate has any more characters
+			 * left is to give it another byte of output space.
+			 */
+			state->strm.next_out = overflow_buf;
+			state->strm.avail_out = 1;
+			overflow = 1;
+		} else {
+			if (state->debug)
+				printk(KERN_DEBUG "z_decompress%d: ran out of mru\n",
+				       state->unit);
+			return DECOMP_FATALERROR;
+		}
+	}
+
+	if (decode_proto) {
+		if (state->debug)
+			printk(KERN_DEBUG "z_decompress%d: didn't get proto\n",
+			       state->unit);
+		return DECOMP_ERROR;
+	}
+
+	olen = osize + overflow - state->strm.avail_out;
+	state->stats.unc_bytes += olen;
+	state->stats.unc_packets++;
+	state->stats.comp_bytes += isize;
+	state->stats.comp_packets++;
+
+	return olen;
+}
+
+/**
+ *	z_incomp - add incompressible input data to the history.
+ *	@arg:	pointer to private state for the decompressor
+ *	@ibuf:	pointer to input packet data
+ *	@icnt:	length of input data.
+ */
+static void z_incomp(void *arg, unsigned char *ibuf, int icnt)
+{
+	struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+	int proto, r;
+
+	/*
+	 * Check that the protocol is one we handle.
+	 */
+	proto = PPP_PROTOCOL(ibuf);
+	if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
+		return;
+
+	++state->seqno;
+
+	/*
+	 * We start at the either the 1st or 2nd byte of the protocol field,
+	 * depending on whether the protocol value is compressible.
+	 */
+	state->strm.next_in = ibuf + 3;
+	state->strm.avail_in = icnt - 3;
+	if (proto > 0xff) {
+		--state->strm.next_in;
+		++state->strm.avail_in;
+	}
+
+	r = zlib_inflateIncomp(&state->strm);
+	if (r != Z_OK) {
+		/* gak! */
+		if (state->debug) {
+			printk(KERN_DEBUG "z_incomp%d: inflateIncomp returned %d (%s)\n",
+			       state->unit, r, (state->strm.msg? state->strm.msg: ""));
+		}
+		return;
+	}
+
+	/*
+	 * Update stats.
+	 */
+	state->stats.inc_bytes += icnt;
+	state->stats.inc_packets++;
+	state->stats.unc_bytes += icnt;
+	state->stats.unc_packets++;
+}
+
+/*************************************************************
+ * Module interface table
+ *************************************************************/
+
+/* These are in ppp_generic.c */
+extern int  ppp_register_compressor   (struct compressor *cp);
+extern void ppp_unregister_compressor (struct compressor *cp);
+
+/*
+ * Procedures exported to if_ppp.c.
+ */
+static struct compressor ppp_deflate = {
+	.compress_proto =	CI_DEFLATE,
+	.comp_alloc =		z_comp_alloc,
+	.comp_free =		z_comp_free,
+	.comp_init =		z_comp_init,
+	.comp_reset =		z_comp_reset,
+	.compress =		z_compress,
+	.comp_stat =		z_comp_stats,
+	.decomp_alloc =		z_decomp_alloc,
+	.decomp_free =		z_decomp_free,
+	.decomp_init =		z_decomp_init,
+	.decomp_reset =		z_decomp_reset,
+	.decompress =		z_decompress,
+	.incomp =		z_incomp,
+	.decomp_stat =		z_comp_stats,
+	.owner =		THIS_MODULE
+};
+
+static struct compressor ppp_deflate_draft = {
+	.compress_proto =	CI_DEFLATE_DRAFT,
+	.comp_alloc =		z_comp_alloc,
+	.comp_free =		z_comp_free,
+	.comp_init =		z_comp_init,
+	.comp_reset =		z_comp_reset,
+	.compress =		z_compress,
+	.comp_stat =		z_comp_stats,
+	.decomp_alloc =		z_decomp_alloc,
+	.decomp_free =		z_decomp_free,
+	.decomp_init =		z_decomp_init,
+	.decomp_reset =		z_decomp_reset,
+	.decompress =		z_decompress,
+	.incomp =		z_incomp,
+	.decomp_stat =		z_comp_stats,
+	.owner =		THIS_MODULE
+};
+
+static int __init deflate_init(void)
+{
+        int answer = ppp_register_compressor(&ppp_deflate);
+        if (answer == 0)
+                printk(KERN_INFO
+		       "PPP Deflate Compression module registered\n");
+	ppp_register_compressor(&ppp_deflate_draft);
+        return answer;
+}
+
+static void __exit deflate_cleanup(void)
+{
+	ppp_unregister_compressor(&ppp_deflate);
+	ppp_unregister_compressor(&ppp_deflate_draft);
+}
+
+module_init(deflate_init);
+module_exit(deflate_cleanup);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
+MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
new file mode 100644
index 0000000..02ad03a
--- /dev/null
+++ b/drivers/net/ppp/ppp_generic.c
@@ -0,0 +1,3289 @@
+/*
+ * Generic PPP layer for Linux.
+ *
+ * Copyright 1999-2002 Paul Mackerras.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * The generic PPP layer handles the PPP network interfaces, the
+ * /dev/ppp device, packet and VJ compression, and multilink.
+ * It talks to PPP `channels' via the interface defined in
+ * include/linux/ppp_channel.h.  Channels provide the basic means for
+ * sending and receiving PPP frames on some kind of communications
+ * channel.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ *
+ * ==FILEVERSION 20041108==
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/ppp_defs.h>
+#include <linux/filter.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp-comp.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/stddef.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <asm/unaligned.h>
+#include <net/slhc_vj.h>
+#include <linux/atomic.h>
+#include <linux/refcount.h>
+
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define PPP_VERSION	"2.4.2"
+
+/*
+ * Network protocols we support.
+ */
+#define NP_IP	0		/* Internet Protocol V4 */
+#define NP_IPV6	1		/* Internet Protocol V6 */
+#define NP_IPX	2		/* IPX protocol */
+#define NP_AT	3		/* Appletalk protocol */
+#define NP_MPLS_UC 4		/* MPLS unicast */
+#define NP_MPLS_MC 5		/* MPLS multicast */
+#define NUM_NP	6		/* Number of NPs. */
+
+#define MPHDRLEN	6	/* multilink protocol header length */
+#define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
+
+/*
+ * An instance of /dev/ppp can be associated with either a ppp
+ * interface unit or a ppp channel.  In both cases, file->private_data
+ * points to one of these.
+ */
+struct ppp_file {
+	enum {
+		INTERFACE=1, CHANNEL
+	}		kind;
+	struct sk_buff_head xq;		/* pppd transmit queue */
+	struct sk_buff_head rq;		/* receive queue for pppd */
+	wait_queue_head_t rwait;	/* for poll on reading /dev/ppp */
+	refcount_t	refcnt;		/* # refs (incl /dev/ppp attached) */
+	int		hdrlen;		/* space to leave for headers */
+	int		index;		/* interface unit / channel number */
+	int		dead;		/* unit/channel has been shut down */
+};
+
+#define PF_TO_X(pf, X)		container_of(pf, X, file)
+
+#define PF_TO_PPP(pf)		PF_TO_X(pf, struct ppp)
+#define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
+
+/*
+ * Data structure to hold primary network stats for which
+ * we want to use 64 bit storage.  Other network stats
+ * are stored in dev->stats of the ppp strucute.
+ */
+struct ppp_link_stats {
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 tx_bytes;
+};
+
+/*
+ * Data structure describing one ppp unit.
+ * A ppp unit corresponds to a ppp network interface device
+ * and represents a multilink bundle.
+ * It can have 0 or more ppp channels connected to it.
+ */
+struct ppp {
+	struct ppp_file	file;		/* stuff for read/write/poll 0 */
+	struct file	*owner;		/* file that owns this unit 48 */
+	struct list_head channels;	/* list of attached channels 4c */
+	int		n_channels;	/* how many channels are attached 54 */
+	spinlock_t	rlock;		/* lock for receive side 58 */
+	spinlock_t	wlock;		/* lock for transmit side 5c */
+	int __percpu	*xmit_recursion; /* xmit recursion detect */
+	int		mru;		/* max receive unit 60 */
+	unsigned int	flags;		/* control bits 64 */
+	unsigned int	xstate;		/* transmit state bits 68 */
+	unsigned int	rstate;		/* receive state bits 6c */
+	int		debug;		/* debug flags 70 */
+	struct slcompress *vj;		/* state for VJ header compression */
+	enum NPmode	npmode[NUM_NP];	/* what to do with each net proto 78 */
+	struct sk_buff	*xmit_pending;	/* a packet ready to go out 88 */
+	struct compressor *xcomp;	/* transmit packet compressor 8c */
+	void		*xc_state;	/* its internal state 90 */
+	struct compressor *rcomp;	/* receive decompressor 94 */
+	void		*rc_state;	/* its internal state 98 */
+	unsigned long	last_xmit;	/* jiffies when last pkt sent 9c */
+	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
+	struct net_device *dev;		/* network interface device a4 */
+	int		closing;	/* is device closing down? a8 */
+#ifdef CONFIG_PPP_MULTILINK
+	int		nxchan;		/* next channel to send something on */
+	u32		nxseq;		/* next sequence number to send */
+	int		mrru;		/* MP: max reconst. receive unit */
+	u32		nextseq;	/* MP: seq no of next packet */
+	u32		minseq;		/* MP: min of most recent seqnos */
+	struct sk_buff_head mrq;	/* MP: receive reconstruction queue */
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+	struct bpf_prog *pass_filter;	/* filter for packets to pass */
+	struct bpf_prog *active_filter; /* filter for pkts to reset idle */
+#endif /* CONFIG_PPP_FILTER */
+	struct net	*ppp_net;	/* the net we belong to */
+	struct ppp_link_stats stats64;	/* 64 bit network stats */
+};
+
+/*
+ * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
+ * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
+ * SC_MUST_COMP
+ * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
+ * Bits in xstate: SC_COMP_RUN
+ */
+#define SC_FLAG_BITS	(SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
+			 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
+			 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
+
+/*
+ * Private data structure for each channel.
+ * This includes the data structure used for multilink.
+ */
+struct channel {
+	struct ppp_file	file;		/* stuff for read/write/poll */
+	struct list_head list;		/* link in all/new_channels list */
+	struct ppp_channel *chan;	/* public channel data structure */
+	struct rw_semaphore chan_sem;	/* protects `chan' during chan ioctl */
+	spinlock_t	downl;		/* protects `chan', file.xq dequeue */
+	struct ppp	*ppp;		/* ppp unit we're connected to */
+	struct net	*chan_net;	/* the net channel belongs to */
+	struct list_head clist;		/* link in list of channels per unit */
+	rwlock_t	upl;		/* protects `ppp' */
+#ifdef CONFIG_PPP_MULTILINK
+	u8		avail;		/* flag used in multilink stuff */
+	u8		had_frag;	/* >= 1 fragments have been sent */
+	u32		lastseq;	/* MP: last sequence # received */
+	int		speed;		/* speed of the corresponding ppp channel*/
+#endif /* CONFIG_PPP_MULTILINK */
+};
+
+struct ppp_config {
+	struct file *file;
+	s32 unit;
+	bool ifname_is_set;
+};
+
+/*
+ * SMP locking issues:
+ * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
+ * list and the ppp.n_channels field, you need to take both locks
+ * before you modify them.
+ * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
+ * channel.downl.
+ */
+
+static DEFINE_MUTEX(ppp_mutex);
+static atomic_t ppp_unit_count = ATOMIC_INIT(0);
+static atomic_t channel_count = ATOMIC_INIT(0);
+
+/* per-net private data for this module */
+static unsigned int ppp_net_id __read_mostly;
+struct ppp_net {
+	/* units to ppp mapping */
+	struct idr units_idr;
+
+	/*
+	 * all_ppp_mutex protects the units_idr mapping.
+	 * It also ensures that finding a ppp unit in the units_idr
+	 * map and updating its file.refcnt field is atomic.
+	 */
+	struct mutex all_ppp_mutex;
+
+	/* channels */
+	struct list_head all_channels;
+	struct list_head new_channels;
+	int last_channel_index;
+
+	/*
+	 * all_channels_lock protects all_channels and
+	 * last_channel_index, and the atomicity of find
+	 * a channel and updating its file.refcnt field.
+	 */
+	spinlock_t all_channels_lock;
+};
+
+/* Get the PPP protocol number from a skb */
+#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
+
+/* We limit the length of ppp->file.rq to this (arbitrary) value */
+#define PPP_MAX_RQLEN	32
+
+/*
+ * Maximum number of multilink fragments queued up.
+ * This has to be large enough to cope with the maximum latency of
+ * the slowest channel relative to the others.  Strictly it should
+ * depend on the number of channels and their characteristics.
+ */
+#define PPP_MP_MAX_QLEN	128
+
+/* Multilink header bits. */
+#define B	0x80		/* this fragment begins a packet */
+#define E	0x40		/* this fragment ends a packet */
+
+/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
+#define seq_before(a, b)	((s32)((a) - (b)) < 0)
+#define seq_after(a, b)		((s32)((a) - (b)) > 0)
+
+/* Prototypes. */
+static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+			struct file *file, unsigned int cmd, unsigned long arg);
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
+static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
+static void ppp_push(struct ppp *ppp);
+static void ppp_channel_push(struct channel *pch);
+static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
+			      struct channel *pch);
+static void ppp_receive_error(struct ppp *ppp);
+static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
+static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
+					    struct sk_buff *skb);
+#ifdef CONFIG_PPP_MULTILINK
+static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
+				struct channel *pch);
+static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
+static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
+static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
+#endif /* CONFIG_PPP_MULTILINK */
+static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
+static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
+static void ppp_ccp_closed(struct ppp *ppp);
+static struct compressor *find_compressor(int type);
+static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
+static int ppp_create_interface(struct net *net, struct file *file, int *unit);
+static void init_ppp_file(struct ppp_file *pf, int kind);
+static void ppp_destroy_interface(struct ppp *ppp);
+static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
+static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
+static int ppp_connect_channel(struct channel *pch, int unit);
+static int ppp_disconnect_channel(struct channel *pch);
+static void ppp_destroy_channel(struct channel *pch);
+static int unit_get(struct idr *p, void *ptr);
+static int unit_set(struct idr *p, void *ptr, int n);
+static void unit_put(struct idr *p, int n);
+static void *unit_find(struct idr *p, int n);
+static void ppp_setup(struct net_device *dev);
+
+static const struct net_device_ops ppp_netdev_ops;
+
+static struct class *ppp_class;
+
+/* per net-namespace data */
+static inline struct ppp_net *ppp_pernet(struct net *net)
+{
+	BUG_ON(!net);
+
+	return net_generic(net, ppp_net_id);
+}
+
+/* Translates a PPP protocol number to a NP index (NP == network protocol) */
+static inline int proto_to_npindex(int proto)
+{
+	switch (proto) {
+	case PPP_IP:
+		return NP_IP;
+	case PPP_IPV6:
+		return NP_IPV6;
+	case PPP_IPX:
+		return NP_IPX;
+	case PPP_AT:
+		return NP_AT;
+	case PPP_MPLS_UC:
+		return NP_MPLS_UC;
+	case PPP_MPLS_MC:
+		return NP_MPLS_MC;
+	}
+	return -EINVAL;
+}
+
+/* Translates an NP index into a PPP protocol number */
+static const int npindex_to_proto[NUM_NP] = {
+	PPP_IP,
+	PPP_IPV6,
+	PPP_IPX,
+	PPP_AT,
+	PPP_MPLS_UC,
+	PPP_MPLS_MC,
+};
+
+/* Translates an ethertype into an NP index */
+static inline int ethertype_to_npindex(int ethertype)
+{
+	switch (ethertype) {
+	case ETH_P_IP:
+		return NP_IP;
+	case ETH_P_IPV6:
+		return NP_IPV6;
+	case ETH_P_IPX:
+		return NP_IPX;
+	case ETH_P_PPPTALK:
+	case ETH_P_ATALK:
+		return NP_AT;
+	case ETH_P_MPLS_UC:
+		return NP_MPLS_UC;
+	case ETH_P_MPLS_MC:
+		return NP_MPLS_MC;
+	}
+	return -1;
+}
+
+/* Translates an NP index into an ethertype */
+static const int npindex_to_ethertype[NUM_NP] = {
+	ETH_P_IP,
+	ETH_P_IPV6,
+	ETH_P_IPX,
+	ETH_P_PPPTALK,
+	ETH_P_MPLS_UC,
+	ETH_P_MPLS_MC,
+};
+
+/*
+ * Locking shorthand.
+ */
+#define ppp_xmit_lock(ppp)	spin_lock_bh(&(ppp)->wlock)
+#define ppp_xmit_unlock(ppp)	spin_unlock_bh(&(ppp)->wlock)
+#define ppp_recv_lock(ppp)	spin_lock_bh(&(ppp)->rlock)
+#define ppp_recv_unlock(ppp)	spin_unlock_bh(&(ppp)->rlock)
+#define ppp_lock(ppp)		do { ppp_xmit_lock(ppp); \
+				     ppp_recv_lock(ppp); } while (0)
+#define ppp_unlock(ppp)		do { ppp_recv_unlock(ppp); \
+				     ppp_xmit_unlock(ppp); } while (0)
+
+/*
+ * /dev/ppp device routines.
+ * The /dev/ppp device is used by pppd to control the ppp unit.
+ * It supports the read, write, ioctl and poll functions.
+ * Open instances of /dev/ppp can be in one of three states:
+ * unattached, attached to a ppp unit, or attached to a ppp channel.
+ */
+static int ppp_open(struct inode *inode, struct file *file)
+{
+	/*
+	 * This could (should?) be enforced by the permissions on /dev/ppp.
+	 */
+	if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
+		return -EPERM;
+	return 0;
+}
+
+static int ppp_release(struct inode *unused, struct file *file)
+{
+	struct ppp_file *pf = file->private_data;
+	struct ppp *ppp;
+
+	if (pf) {
+		file->private_data = NULL;
+		if (pf->kind == INTERFACE) {
+			ppp = PF_TO_PPP(pf);
+			rtnl_lock();
+			if (file == ppp->owner)
+				unregister_netdevice(ppp->dev);
+			rtnl_unlock();
+		}
+		if (refcount_dec_and_test(&pf->refcnt)) {
+			switch (pf->kind) {
+			case INTERFACE:
+				ppp_destroy_interface(PF_TO_PPP(pf));
+				break;
+			case CHANNEL:
+				ppp_destroy_channel(PF_TO_CHANNEL(pf));
+				break;
+			}
+		}
+	}
+	return 0;
+}
+
+static ssize_t ppp_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct ppp_file *pf = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t ret;
+	struct sk_buff *skb = NULL;
+	struct iovec iov;
+	struct iov_iter to;
+
+	ret = count;
+
+	if (!pf)
+		return -ENXIO;
+	add_wait_queue(&pf->rwait, &wait);
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		skb = skb_dequeue(&pf->rq);
+		if (skb)
+			break;
+		ret = 0;
+		if (pf->dead)
+			break;
+		if (pf->kind == INTERFACE) {
+			/*
+			 * Return 0 (EOF) on an interface that has no
+			 * channels connected, unless it is looping
+			 * network traffic (demand mode).
+			 */
+			struct ppp *ppp = PF_TO_PPP(pf);
+
+			ppp_recv_lock(ppp);
+			if (ppp->n_channels == 0 &&
+			    (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
+				ppp_recv_unlock(ppp);
+				break;
+			}
+			ppp_recv_unlock(ppp);
+		}
+		ret = -EAGAIN;
+		if (file->f_flags & O_NONBLOCK)
+			break;
+		ret = -ERESTARTSYS;
+		if (signal_pending(current))
+			break;
+		schedule();
+	}
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&pf->rwait, &wait);
+
+	if (!skb)
+		goto out;
+
+	ret = -EOVERFLOW;
+	if (skb->len > count)
+		goto outf;
+	ret = -EFAULT;
+	iov.iov_base = buf;
+	iov.iov_len = count;
+	iov_iter_init(&to, READ, &iov, 1, count);
+	if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
+		goto outf;
+	ret = skb->len;
+
+ outf:
+	kfree_skb(skb);
+ out:
+	return ret;
+}
+
+static ssize_t ppp_write(struct file *file, const char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	struct ppp_file *pf = file->private_data;
+	struct sk_buff *skb;
+	ssize_t ret;
+
+	if (!pf)
+		return -ENXIO;
+	ret = -ENOMEM;
+	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
+	if (!skb)
+		goto out;
+	skb_reserve(skb, pf->hdrlen);
+	ret = -EFAULT;
+	if (copy_from_user(skb_put(skb, count), buf, count)) {
+		kfree_skb(skb);
+		goto out;
+	}
+
+	switch (pf->kind) {
+	case INTERFACE:
+		ppp_xmit_process(PF_TO_PPP(pf), skb);
+		break;
+	case CHANNEL:
+		skb_queue_tail(&pf->xq, skb);
+		ppp_channel_push(PF_TO_CHANNEL(pf));
+		break;
+	}
+
+	ret = count;
+
+ out:
+	return ret;
+}
+
+/* No kernel lock - fine */
+static __poll_t ppp_poll(struct file *file, poll_table *wait)
+{
+	struct ppp_file *pf = file->private_data;
+	__poll_t mask;
+
+	if (!pf)
+		return 0;
+	poll_wait(file, &pf->rwait, wait);
+	mask = EPOLLOUT | EPOLLWRNORM;
+	if (skb_peek(&pf->rq))
+		mask |= EPOLLIN | EPOLLRDNORM;
+	if (pf->dead)
+		mask |= EPOLLHUP;
+	else if (pf->kind == INTERFACE) {
+		/* see comment in ppp_read */
+		struct ppp *ppp = PF_TO_PPP(pf);
+
+		ppp_recv_lock(ppp);
+		if (ppp->n_channels == 0 &&
+		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+			mask |= EPOLLIN | EPOLLRDNORM;
+		ppp_recv_unlock(ppp);
+	}
+
+	return mask;
+}
+
+#ifdef CONFIG_PPP_FILTER
+static int get_filter(void __user *arg, struct sock_filter **p)
+{
+	struct sock_fprog uprog;
+	struct sock_filter *code = NULL;
+	int len;
+
+	if (copy_from_user(&uprog, arg, sizeof(uprog)))
+		return -EFAULT;
+
+	if (!uprog.len) {
+		*p = NULL;
+		return 0;
+	}
+
+	len = uprog.len * sizeof(struct sock_filter);
+	code = memdup_user(uprog.filter, len);
+	if (IS_ERR(code))
+		return PTR_ERR(code);
+
+	*p = code;
+	return uprog.len;
+}
+#endif /* CONFIG_PPP_FILTER */
+
+static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct ppp_file *pf;
+	struct ppp *ppp;
+	int err = -EFAULT, val, val2, i;
+	struct ppp_idle idle;
+	struct npioctl npi;
+	int unit, cflags;
+	struct slcompress *vj;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+
+	mutex_lock(&ppp_mutex);
+
+	pf = file->private_data;
+	if (!pf) {
+		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
+					   pf, file, cmd, arg);
+		goto out;
+	}
+
+	if (cmd == PPPIOCDETACH) {
+		/*
+		 * PPPIOCDETACH is no longer supported as it was heavily broken,
+		 * and is only known to have been used by pppd older than
+		 * ppp-2.4.2 (released November 2003).
+		 */
+		pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
+			     current->comm, current->pid);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (pf->kind == CHANNEL) {
+		struct channel *pch;
+		struct ppp_channel *chan;
+
+		pch = PF_TO_CHANNEL(pf);
+
+		switch (cmd) {
+		case PPPIOCCONNECT:
+			if (get_user(unit, p))
+				break;
+			err = ppp_connect_channel(pch, unit);
+			break;
+
+		case PPPIOCDISCONN:
+			err = ppp_disconnect_channel(pch);
+			break;
+
+		default:
+			down_read(&pch->chan_sem);
+			chan = pch->chan;
+			err = -ENOTTY;
+			if (chan && chan->ops->ioctl)
+				err = chan->ops->ioctl(chan, cmd, arg);
+			up_read(&pch->chan_sem);
+		}
+		goto out;
+	}
+
+	if (pf->kind != INTERFACE) {
+		/* can't happen */
+		pr_err("PPP: not interface or channel??\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	ppp = PF_TO_PPP(pf);
+	switch (cmd) {
+	case PPPIOCSMRU:
+		if (get_user(val, p))
+			break;
+		ppp->mru = val;
+		err = 0;
+		break;
+
+	case PPPIOCSFLAGS:
+		if (get_user(val, p))
+			break;
+		ppp_lock(ppp);
+		cflags = ppp->flags & ~val;
+#ifdef CONFIG_PPP_MULTILINK
+		if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
+			ppp->nextseq = 0;
+#endif
+		ppp->flags = val & SC_FLAG_BITS;
+		ppp_unlock(ppp);
+		if (cflags & SC_CCP_OPEN)
+			ppp_ccp_closed(ppp);
+		err = 0;
+		break;
+
+	case PPPIOCGFLAGS:
+		val = ppp->flags | ppp->xstate | ppp->rstate;
+		if (put_user(val, p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCSCOMPRESS:
+		err = ppp_set_compress(ppp, arg);
+		break;
+
+	case PPPIOCGUNIT:
+		if (put_user(ppp->file.index, p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCSDEBUG:
+		if (get_user(val, p))
+			break;
+		ppp->debug = val;
+		err = 0;
+		break;
+
+	case PPPIOCGDEBUG:
+		if (put_user(ppp->debug, p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGIDLE:
+		idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+		idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
+		if (copy_to_user(argp, &idle, sizeof(idle)))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCSMAXCID:
+		if (get_user(val, p))
+			break;
+		val2 = 15;
+		if ((val >> 16) != 0) {
+			val2 = val >> 16;
+			val &= 0xffff;
+		}
+		vj = slhc_init(val2+1, val+1);
+		if (IS_ERR(vj)) {
+			err = PTR_ERR(vj);
+			break;
+		}
+		ppp_lock(ppp);
+		if (ppp->vj)
+			slhc_free(ppp->vj);
+		ppp->vj = vj;
+		ppp_unlock(ppp);
+		err = 0;
+		break;
+
+	case PPPIOCGNPMODE:
+	case PPPIOCSNPMODE:
+		if (copy_from_user(&npi, argp, sizeof(npi)))
+			break;
+		err = proto_to_npindex(npi.protocol);
+		if (err < 0)
+			break;
+		i = err;
+		if (cmd == PPPIOCGNPMODE) {
+			err = -EFAULT;
+			npi.mode = ppp->npmode[i];
+			if (copy_to_user(argp, &npi, sizeof(npi)))
+				break;
+		} else {
+			ppp->npmode[i] = npi.mode;
+			/* we may be able to transmit more packets now (??) */
+			netif_wake_queue(ppp->dev);
+		}
+		err = 0;
+		break;
+
+#ifdef CONFIG_PPP_FILTER
+	case PPPIOCSPASS:
+	{
+		struct sock_filter *code;
+
+		err = get_filter(argp, &code);
+		if (err >= 0) {
+			struct bpf_prog *pass_filter = NULL;
+			struct sock_fprog_kern fprog = {
+				.len = err,
+				.filter = code,
+			};
+
+			err = 0;
+			if (fprog.filter)
+				err = bpf_prog_create(&pass_filter, &fprog);
+			if (!err) {
+				ppp_lock(ppp);
+				if (ppp->pass_filter)
+					bpf_prog_destroy(ppp->pass_filter);
+				ppp->pass_filter = pass_filter;
+				ppp_unlock(ppp);
+			}
+			kfree(code);
+		}
+		break;
+	}
+	case PPPIOCSACTIVE:
+	{
+		struct sock_filter *code;
+
+		err = get_filter(argp, &code);
+		if (err >= 0) {
+			struct bpf_prog *active_filter = NULL;
+			struct sock_fprog_kern fprog = {
+				.len = err,
+				.filter = code,
+			};
+
+			err = 0;
+			if (fprog.filter)
+				err = bpf_prog_create(&active_filter, &fprog);
+			if (!err) {
+				ppp_lock(ppp);
+				if (ppp->active_filter)
+					bpf_prog_destroy(ppp->active_filter);
+				ppp->active_filter = active_filter;
+				ppp_unlock(ppp);
+			}
+			kfree(code);
+		}
+		break;
+	}
+#endif /* CONFIG_PPP_FILTER */
+
+#ifdef CONFIG_PPP_MULTILINK
+	case PPPIOCSMRRU:
+		if (get_user(val, p))
+			break;
+		ppp_recv_lock(ppp);
+		ppp->mrru = val;
+		ppp_recv_unlock(ppp);
+		err = 0;
+		break;
+#endif /* CONFIG_PPP_MULTILINK */
+
+	default:
+		err = -ENOTTY;
+	}
+
+out:
+	mutex_unlock(&ppp_mutex);
+
+	return err;
+}
+
+static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+			struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int unit, err = -EFAULT;
+	struct ppp *ppp;
+	struct channel *chan;
+	struct ppp_net *pn;
+	int __user *p = (int __user *)arg;
+
+	switch (cmd) {
+	case PPPIOCNEWUNIT:
+		/* Create a new ppp unit */
+		if (get_user(unit, p))
+			break;
+		err = ppp_create_interface(net, file, &unit);
+		if (err < 0)
+			break;
+
+		err = -EFAULT;
+		if (put_user(unit, p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCATTACH:
+		/* Attach to an existing ppp unit */
+		if (get_user(unit, p))
+			break;
+		err = -ENXIO;
+		pn = ppp_pernet(net);
+		mutex_lock(&pn->all_ppp_mutex);
+		ppp = ppp_find_unit(pn, unit);
+		if (ppp) {
+			refcount_inc(&ppp->file.refcnt);
+			file->private_data = &ppp->file;
+			err = 0;
+		}
+		mutex_unlock(&pn->all_ppp_mutex);
+		break;
+
+	case PPPIOCATTCHAN:
+		if (get_user(unit, p))
+			break;
+		err = -ENXIO;
+		pn = ppp_pernet(net);
+		spin_lock_bh(&pn->all_channels_lock);
+		chan = ppp_find_channel(pn, unit);
+		if (chan) {
+			refcount_inc(&chan->file.refcnt);
+			file->private_data = &chan->file;
+			err = 0;
+		}
+		spin_unlock_bh(&pn->all_channels_lock);
+		break;
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+static const struct file_operations ppp_device_fops = {
+	.owner		= THIS_MODULE,
+	.read		= ppp_read,
+	.write		= ppp_write,
+	.poll		= ppp_poll,
+	.unlocked_ioctl	= ppp_ioctl,
+	.open		= ppp_open,
+	.release	= ppp_release,
+	.llseek		= noop_llseek,
+};
+
+static __net_init int ppp_init_net(struct net *net)
+{
+	struct ppp_net *pn = net_generic(net, ppp_net_id);
+
+	idr_init(&pn->units_idr);
+	mutex_init(&pn->all_ppp_mutex);
+
+	INIT_LIST_HEAD(&pn->all_channels);
+	INIT_LIST_HEAD(&pn->new_channels);
+
+	spin_lock_init(&pn->all_channels_lock);
+
+	return 0;
+}
+
+static __net_exit void ppp_exit_net(struct net *net)
+{
+	struct ppp_net *pn = net_generic(net, ppp_net_id);
+	struct net_device *dev;
+	struct net_device *aux;
+	struct ppp *ppp;
+	LIST_HEAD(list);
+	int id;
+
+	rtnl_lock();
+	for_each_netdev_safe(net, dev, aux) {
+		if (dev->netdev_ops == &ppp_netdev_ops)
+			unregister_netdevice_queue(dev, &list);
+	}
+
+	idr_for_each_entry(&pn->units_idr, ppp, id)
+		/* Skip devices already unregistered by previous loop */
+		if (!net_eq(dev_net(ppp->dev), net))
+			unregister_netdevice_queue(ppp->dev, &list);
+
+	unregister_netdevice_many(&list);
+	rtnl_unlock();
+
+	mutex_destroy(&pn->all_ppp_mutex);
+	idr_destroy(&pn->units_idr);
+	WARN_ON_ONCE(!list_empty(&pn->all_channels));
+	WARN_ON_ONCE(!list_empty(&pn->new_channels));
+}
+
+static struct pernet_operations ppp_net_ops = {
+	.init = ppp_init_net,
+	.exit = ppp_exit_net,
+	.id   = &ppp_net_id,
+	.size = sizeof(struct ppp_net),
+};
+
+static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
+{
+	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+	int ret;
+
+	mutex_lock(&pn->all_ppp_mutex);
+
+	if (unit < 0) {
+		ret = unit_get(&pn->units_idr, ppp);
+		if (ret < 0)
+			goto err;
+	} else {
+		/* Caller asked for a specific unit number. Fail with -EEXIST
+		 * if unavailable. For backward compatibility, return -EEXIST
+		 * too if idr allocation fails; this makes pppd retry without
+		 * requesting a specific unit number.
+		 */
+		if (unit_find(&pn->units_idr, unit)) {
+			ret = -EEXIST;
+			goto err;
+		}
+		ret = unit_set(&pn->units_idr, ppp, unit);
+		if (ret < 0) {
+			/* Rewrite error for backward compatibility */
+			ret = -EEXIST;
+			goto err;
+		}
+	}
+	ppp->file.index = ret;
+
+	if (!ifname_is_set)
+		snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+
+	mutex_unlock(&pn->all_ppp_mutex);
+
+	ret = register_netdevice(ppp->dev);
+	if (ret < 0)
+		goto err_unit;
+
+	atomic_inc(&ppp_unit_count);
+
+	return 0;
+
+err_unit:
+	mutex_lock(&pn->all_ppp_mutex);
+	unit_put(&pn->units_idr, ppp->file.index);
+err:
+	mutex_unlock(&pn->all_ppp_mutex);
+
+	return ret;
+}
+
+static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+			     const struct ppp_config *conf)
+{
+	struct ppp *ppp = netdev_priv(dev);
+	int indx;
+	int err;
+	int cpu;
+
+	ppp->dev = dev;
+	ppp->ppp_net = src_net;
+	ppp->mru = PPP_MRU;
+	ppp->owner = conf->file;
+
+	init_ppp_file(&ppp->file, INTERFACE);
+	ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+
+	for (indx = 0; indx < NUM_NP; ++indx)
+		ppp->npmode[indx] = NPMODE_PASS;
+	INIT_LIST_HEAD(&ppp->channels);
+	spin_lock_init(&ppp->rlock);
+	spin_lock_init(&ppp->wlock);
+
+	ppp->xmit_recursion = alloc_percpu(int);
+	if (!ppp->xmit_recursion) {
+		err = -ENOMEM;
+		goto err1;
+	}
+	for_each_possible_cpu(cpu)
+		(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
+
+#ifdef CONFIG_PPP_MULTILINK
+	ppp->minseq = -1;
+	skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+	ppp->pass_filter = NULL;
+	ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
+
+	err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
+	if (err < 0)
+		goto err2;
+
+	conf->file->private_data = &ppp->file;
+
+	return 0;
+err2:
+	free_percpu(ppp->xmit_recursion);
+err1:
+	return err;
+}
+
+static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
+	[IFLA_PPP_DEV_FD]	= { .type = NLA_S32 },
+};
+
+static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
+			   struct netlink_ext_ack *extack)
+{
+	if (!data)
+		return -EINVAL;
+
+	if (!data[IFLA_PPP_DEV_FD])
+		return -EINVAL;
+	if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
+		return -EBADF;
+
+	return 0;
+}
+
+static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
+			  struct nlattr *tb[], struct nlattr *data[],
+			  struct netlink_ext_ack *extack)
+{
+	struct ppp_config conf = {
+		.unit = -1,
+		.ifname_is_set = true,
+	};
+	struct file *file;
+	int err;
+
+	file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
+	if (!file)
+		return -EBADF;
+
+	/* rtnl_lock is already held here, but ppp_create_interface() locks
+	 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
+	 * possible deadlock due to lock order inversion, at the cost of
+	 * pushing the problem back to userspace.
+	 */
+	if (!mutex_trylock(&ppp_mutex)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (file->f_op != &ppp_device_fops || file->private_data) {
+		err = -EBADF;
+		goto out_unlock;
+	}
+
+	conf.file = file;
+
+	/* Don't use device name generated by the rtnetlink layer when ifname
+	 * isn't specified. Let ppp_dev_configure() set the device name using
+	 * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
+	 * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
+	 */
+	if (!tb[IFLA_IFNAME])
+		conf.ifname_is_set = false;
+
+	err = ppp_dev_configure(src_net, dev, &conf);
+
+out_unlock:
+	mutex_unlock(&ppp_mutex);
+out:
+	fput(file);
+
+	return err;
+}
+
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
+{
+	unregister_netdevice_queue(dev, head);
+}
+
+static size_t ppp_nl_get_size(const struct net_device *dev)
+{
+	return 0;
+}
+
+static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	return 0;
+}
+
+static struct net *ppp_nl_get_link_net(const struct net_device *dev)
+{
+	struct ppp *ppp = netdev_priv(dev);
+
+	return ppp->ppp_net;
+}
+
+static struct rtnl_link_ops ppp_link_ops __read_mostly = {
+	.kind		= "ppp",
+	.maxtype	= IFLA_PPP_MAX,
+	.policy		= ppp_nl_policy,
+	.priv_size	= sizeof(struct ppp),
+	.setup		= ppp_setup,
+	.validate	= ppp_nl_validate,
+	.newlink	= ppp_nl_newlink,
+	.dellink	= ppp_nl_dellink,
+	.get_size	= ppp_nl_get_size,
+	.fill_info	= ppp_nl_fill_info,
+	.get_link_net	= ppp_nl_get_link_net,
+};
+
+#define PPP_MAJOR	108
+
+/* Called at boot time if ppp is compiled into the kernel,
+   or at module load time (from init_module) if compiled as a module. */
+static int __init ppp_init(void)
+{
+	int err;
+
+	pr_info("PPP generic driver version " PPP_VERSION "\n");
+
+	err = register_pernet_device(&ppp_net_ops);
+	if (err) {
+		pr_err("failed to register PPP pernet device (%d)\n", err);
+		goto out;
+	}
+
+	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
+	if (err) {
+		pr_err("failed to register PPP device (%d)\n", err);
+		goto out_net;
+	}
+
+	ppp_class = class_create(THIS_MODULE, "ppp");
+	if (IS_ERR(ppp_class)) {
+		err = PTR_ERR(ppp_class);
+		goto out_chrdev;
+	}
+
+	err = rtnl_link_register(&ppp_link_ops);
+	if (err) {
+		pr_err("failed to register rtnetlink PPP handler\n");
+		goto out_class;
+	}
+
+	/* not a big deal if we fail here :-) */
+	device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+
+	return 0;
+
+out_class:
+	class_destroy(ppp_class);
+out_chrdev:
+	unregister_chrdev(PPP_MAJOR, "ppp");
+out_net:
+	unregister_pernet_device(&ppp_net_ops);
+out:
+	return err;
+}
+
+/*
+ * Network interface unit routines.
+ */
+static netdev_tx_t
+ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ppp *ppp = netdev_priv(dev);
+	int npi, proto;
+	unsigned char *pp;
+
+	npi = ethertype_to_npindex(ntohs(skb->protocol));
+	if (npi < 0)
+		goto outf;
+
+	/* Drop, accept or reject the packet */
+	switch (ppp->npmode[npi]) {
+	case NPMODE_PASS:
+		break;
+	case NPMODE_QUEUE:
+		/* it would be nice to have a way to tell the network
+		   system to queue this one up for later. */
+		goto outf;
+	case NPMODE_DROP:
+	case NPMODE_ERROR:
+		goto outf;
+	}
+
+	/* Put the 2-byte PPP protocol number on the front,
+	   making sure there is room for the address and control fields. */
+	if (skb_cow_head(skb, PPP_HDRLEN))
+		goto outf;
+
+	pp = skb_push(skb, 2);
+	proto = npindex_to_proto[npi];
+	put_unaligned_be16(proto, pp);
+
+	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
+	ppp_xmit_process(ppp, skb);
+
+	return NETDEV_TX_OK;
+
+ outf:
+	kfree_skb(skb);
+	++dev->stats.tx_dropped;
+	return NETDEV_TX_OK;
+}
+
+static int
+ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct ppp *ppp = netdev_priv(dev);
+	int err = -EFAULT;
+	void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+	struct ppp_stats stats;
+	struct ppp_comp_stats cstats;
+	char *vers;
+
+	switch (cmd) {
+	case SIOCGPPPSTATS:
+		ppp_get_stats(ppp, &stats);
+		if (copy_to_user(addr, &stats, sizeof(stats)))
+			break;
+		err = 0;
+		break;
+
+	case SIOCGPPPCSTATS:
+		memset(&cstats, 0, sizeof(cstats));
+		if (ppp->xc_state)
+			ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
+		if (ppp->rc_state)
+			ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
+		if (copy_to_user(addr, &cstats, sizeof(cstats)))
+			break;
+		err = 0;
+		break;
+
+	case SIOCGPPPVER:
+		vers = PPP_VERSION;
+		if (copy_to_user(addr, vers, strlen(vers) + 1))
+			break;
+		err = 0;
+		break;
+
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static void
+ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+{
+	struct ppp *ppp = netdev_priv(dev);
+
+	ppp_recv_lock(ppp);
+	stats64->rx_packets = ppp->stats64.rx_packets;
+	stats64->rx_bytes   = ppp->stats64.rx_bytes;
+	ppp_recv_unlock(ppp);
+
+	ppp_xmit_lock(ppp);
+	stats64->tx_packets = ppp->stats64.tx_packets;
+	stats64->tx_bytes   = ppp->stats64.tx_bytes;
+	ppp_xmit_unlock(ppp);
+
+	stats64->rx_errors        = dev->stats.rx_errors;
+	stats64->tx_errors        = dev->stats.tx_errors;
+	stats64->rx_dropped       = dev->stats.rx_dropped;
+	stats64->tx_dropped       = dev->stats.tx_dropped;
+	stats64->rx_length_errors = dev->stats.rx_length_errors;
+}
+
+static int ppp_dev_init(struct net_device *dev)
+{
+	struct ppp *ppp;
+
+	netdev_lockdep_set_classes(dev);
+
+	ppp = netdev_priv(dev);
+	/* Let the netdevice take a reference on the ppp file. This ensures
+	 * that ppp_destroy_interface() won't run before the device gets
+	 * unregistered.
+	 */
+	refcount_inc(&ppp->file.refcnt);
+
+	return 0;
+}
+
+static void ppp_dev_uninit(struct net_device *dev)
+{
+	struct ppp *ppp = netdev_priv(dev);
+	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+	ppp_lock(ppp);
+	ppp->closing = 1;
+	ppp_unlock(ppp);
+
+	mutex_lock(&pn->all_ppp_mutex);
+	unit_put(&pn->units_idr, ppp->file.index);
+	mutex_unlock(&pn->all_ppp_mutex);
+
+	ppp->owner = NULL;
+
+	ppp->file.dead = 1;
+	wake_up_interruptible(&ppp->file.rwait);
+}
+
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+	struct ppp *ppp;
+
+	ppp = netdev_priv(dev);
+	if (refcount_dec_and_test(&ppp->file.refcnt))
+		ppp_destroy_interface(ppp);
+}
+
+static const struct net_device_ops ppp_netdev_ops = {
+	.ndo_init	 = ppp_dev_init,
+	.ndo_uninit      = ppp_dev_uninit,
+	.ndo_start_xmit  = ppp_start_xmit,
+	.ndo_do_ioctl    = ppp_net_ioctl,
+	.ndo_get_stats64 = ppp_get_stats64,
+};
+
+static struct device_type ppp_type = {
+	.name = "ppp",
+};
+
+static void ppp_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &ppp_netdev_ops;
+	SET_NETDEV_DEVTYPE(dev, &ppp_type);
+
+	dev->features |= NETIF_F_LLTX;
+
+	dev->hard_header_len = PPP_HDRLEN;
+	dev->mtu = PPP_MRU;
+	dev->addr_len = 0;
+	dev->tx_queue_len = 3;
+	dev->type = ARPHRD_PPP;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+	dev->priv_destructor = ppp_dev_priv_destructor;
+	netif_keep_dst(dev);
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/* Called to do any work queued up on the transmit side that can now be done */
+static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
+{
+	ppp_xmit_lock(ppp);
+	if (!ppp->closing) {
+		ppp_push(ppp);
+
+		if (skb)
+			skb_queue_tail(&ppp->file.xq, skb);
+		while (!ppp->xmit_pending &&
+		       (skb = skb_dequeue(&ppp->file.xq)))
+			ppp_send_frame(ppp, skb);
+		/* If there's no work left to do, tell the core net
+		   code that we can accept some more. */
+		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+			netif_wake_queue(ppp->dev);
+		else
+			netif_stop_queue(ppp->dev);
+	}
+	ppp_xmit_unlock(ppp);
+}
+
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
+{
+	local_bh_disable();
+
+	if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
+		goto err;
+
+	(*this_cpu_ptr(ppp->xmit_recursion))++;
+	__ppp_xmit_process(ppp, skb);
+	(*this_cpu_ptr(ppp->xmit_recursion))--;
+
+	local_bh_enable();
+
+	return;
+
+err:
+	local_bh_enable();
+
+	kfree_skb(skb);
+
+	if (net_ratelimit())
+		netdev_err(ppp->dev, "recursion detected\n");
+}
+
+static inline struct sk_buff *
+pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
+{
+	struct sk_buff *new_skb;
+	int len;
+	int new_skb_size = ppp->dev->mtu +
+		ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
+	int compressor_skb_size = ppp->dev->mtu +
+		ppp->xcomp->comp_extra + PPP_HDRLEN;
+	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
+	if (!new_skb) {
+		if (net_ratelimit())
+			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
+		return NULL;
+	}
+	if (ppp->dev->hard_header_len > PPP_HDRLEN)
+		skb_reserve(new_skb,
+			    ppp->dev->hard_header_len - PPP_HDRLEN);
+
+	/* compressor still expects A/C bytes in hdr */
+	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
+				   new_skb->data, skb->len + 2,
+				   compressor_skb_size);
+	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
+		consume_skb(skb);
+		skb = new_skb;
+		skb_put(skb, len);
+		skb_pull(skb, 2);	/* pull off A/C bytes */
+	} else if (len == 0) {
+		/* didn't compress, or CCP not up yet */
+		consume_skb(new_skb);
+		new_skb = skb;
+	} else {
+		/*
+		 * (len < 0)
+		 * MPPE requires that we do not send unencrypted
+		 * frames.  The compressor will return -1 if we
+		 * should drop the frame.  We cannot simply test
+		 * the compress_proto because MPPE and MPPC share
+		 * the same number.
+		 */
+		if (net_ratelimit())
+			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
+		kfree_skb(skb);
+		consume_skb(new_skb);
+		new_skb = NULL;
+	}
+	return new_skb;
+}
+
+/*
+ * Compress and send a frame.
+ * The caller should have locked the xmit path,
+ * and xmit_pending should be 0.
+ */
+static void
+ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+	int proto = PPP_PROTO(skb);
+	struct sk_buff *new_skb;
+	int len;
+	unsigned char *cp;
+
+	if (proto < 0x8000) {
+#ifdef CONFIG_PPP_FILTER
+		/* check if we should pass this packet */
+		/* the filter instructions are constructed assuming
+		   a four-byte PPP header on each packet */
+		*(u8 *)skb_push(skb, 2) = 1;
+		if (ppp->pass_filter &&
+		    BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
+			if (ppp->debug & 1)
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "PPP: outbound frame "
+					      "not passed\n");
+			kfree_skb(skb);
+			return;
+		}
+		/* if this packet passes the active filter, record the time */
+		if (!(ppp->active_filter &&
+		      BPF_PROG_RUN(ppp->active_filter, skb) == 0))
+			ppp->last_xmit = jiffies;
+		skb_pull(skb, 2);
+#else
+		/* for data packets, record the time */
+		ppp->last_xmit = jiffies;
+#endif /* CONFIG_PPP_FILTER */
+	}
+
+	++ppp->stats64.tx_packets;
+	ppp->stats64.tx_bytes += skb->len - 2;
+
+	switch (proto) {
+	case PPP_IP:
+		if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
+			break;
+		/* try to do VJ TCP header compression */
+		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
+				    GFP_ATOMIC);
+		if (!new_skb) {
+			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
+			goto drop;
+		}
+		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
+		cp = skb->data + 2;
+		len = slhc_compress(ppp->vj, cp, skb->len - 2,
+				    new_skb->data + 2, &cp,
+				    !(ppp->flags & SC_NO_TCP_CCID));
+		if (cp == skb->data + 2) {
+			/* didn't compress */
+			consume_skb(new_skb);
+		} else {
+			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
+				proto = PPP_VJC_COMP;
+				cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
+			} else {
+				proto = PPP_VJC_UNCOMP;
+				cp[0] = skb->data[2];
+			}
+			consume_skb(skb);
+			skb = new_skb;
+			cp = skb_put(skb, len + 2);
+			cp[0] = 0;
+			cp[1] = proto;
+		}
+		break;
+
+	case PPP_CCP:
+		/* peek at outbound CCP frames */
+		ppp_ccp_peek(ppp, skb, 0);
+		break;
+	}
+
+	/* try to do packet compression */
+	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
+	    proto != PPP_LCP && proto != PPP_CCP) {
+		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
+			if (net_ratelimit())
+				netdev_err(ppp->dev,
+					   "ppp: compression required but "
+					   "down - pkt dropped.\n");
+			goto drop;
+		}
+		skb = pad_compress_skb(ppp, skb);
+		if (!skb)
+			goto drop;
+	}
+
+	/*
+	 * If we are waiting for traffic (demand dialling),
+	 * queue it up for pppd to receive.
+	 */
+	if (ppp->flags & SC_LOOP_TRAFFIC) {
+		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
+			goto drop;
+		skb_queue_tail(&ppp->file.rq, skb);
+		wake_up_interruptible(&ppp->file.rwait);
+		return;
+	}
+
+	ppp->xmit_pending = skb;
+	ppp_push(ppp);
+	return;
+
+ drop:
+	kfree_skb(skb);
+	++ppp->dev->stats.tx_errors;
+}
+
+/*
+ * Try to send the frame in xmit_pending.
+ * The caller should have the xmit path locked.
+ */
+static void
+ppp_push(struct ppp *ppp)
+{
+	struct list_head *list;
+	struct channel *pch;
+	struct sk_buff *skb = ppp->xmit_pending;
+
+	if (!skb)
+		return;
+
+	list = &ppp->channels;
+	if (list_empty(list)) {
+		/* nowhere to send the packet, just drop it */
+		ppp->xmit_pending = NULL;
+		kfree_skb(skb);
+		return;
+	}
+
+	if ((ppp->flags & SC_MULTILINK) == 0) {
+		/* not doing multilink: send it down the first channel */
+		list = list->next;
+		pch = list_entry(list, struct channel, clist);
+
+		spin_lock(&pch->downl);
+		if (pch->chan) {
+			if (pch->chan->ops->start_xmit(pch->chan, skb))
+				ppp->xmit_pending = NULL;
+		} else {
+			/* channel got unregistered */
+			kfree_skb(skb);
+			ppp->xmit_pending = NULL;
+		}
+		spin_unlock(&pch->downl);
+		return;
+	}
+
+#ifdef CONFIG_PPP_MULTILINK
+	/* Multilink: fragment the packet over as many links
+	   as can take the packet at the moment. */
+	if (!ppp_mp_explode(ppp, skb))
+		return;
+#endif /* CONFIG_PPP_MULTILINK */
+
+	ppp->xmit_pending = NULL;
+	kfree_skb(skb);
+}
+
+#ifdef CONFIG_PPP_MULTILINK
+static bool mp_protocol_compress __read_mostly = true;
+module_param(mp_protocol_compress, bool, 0644);
+MODULE_PARM_DESC(mp_protocol_compress,
+		 "compress protocol id in multilink fragments");
+
+/*
+ * Divide a packet to be transmitted into fragments and
+ * send them out the individual links.
+ */
+static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
+{
+	int len, totlen;
+	int i, bits, hdrlen, mtu;
+	int flen;
+	int navail, nfree, nzero;
+	int nbigger;
+	int totspeed;
+	int totfree;
+	unsigned char *p, *q;
+	struct list_head *list;
+	struct channel *pch;
+	struct sk_buff *frag;
+	struct ppp_channel *chan;
+
+	totspeed = 0; /*total bitrate of the bundle*/
+	nfree = 0; /* # channels which have no packet already queued */
+	navail = 0; /* total # of usable channels (not deregistered) */
+	nzero = 0; /* number of channels with zero speed associated*/
+	totfree = 0; /*total # of channels available and
+				  *having no queued packets before
+				  *starting the fragmentation*/
+
+	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
+	i = 0;
+	list_for_each_entry(pch, &ppp->channels, clist) {
+		if (pch->chan) {
+			pch->avail = 1;
+			navail++;
+			pch->speed = pch->chan->speed;
+		} else {
+			pch->avail = 0;
+		}
+		if (pch->avail) {
+			if (skb_queue_empty(&pch->file.xq) ||
+				!pch->had_frag) {
+					if (pch->speed == 0)
+						nzero++;
+					else
+						totspeed += pch->speed;
+
+					pch->avail = 2;
+					++nfree;
+					++totfree;
+				}
+			if (!pch->had_frag && i < ppp->nxchan)
+				ppp->nxchan = i;
+		}
+		++i;
+	}
+	/*
+	 * Don't start sending this packet unless at least half of
+	 * the channels are free.  This gives much better TCP
+	 * performance if we have a lot of channels.
+	 */
+	if (nfree == 0 || nfree < navail / 2)
+		return 0; /* can't take now, leave it in xmit_pending */
+
+	/* Do protocol field compression */
+	p = skb->data;
+	len = skb->len;
+	if (*p == 0 && mp_protocol_compress) {
+		++p;
+		--len;
+	}
+
+	totlen = len;
+	nbigger = len % nfree;
+
+	/* skip to the channel after the one we last used
+	   and start at that one */
+	list = &ppp->channels;
+	for (i = 0; i < ppp->nxchan; ++i) {
+		list = list->next;
+		if (list == &ppp->channels) {
+			i = 0;
+			break;
+		}
+	}
+
+	/* create a fragment for each channel */
+	bits = B;
+	while (len > 0) {
+		list = list->next;
+		if (list == &ppp->channels) {
+			i = 0;
+			continue;
+		}
+		pch = list_entry(list, struct channel, clist);
+		++i;
+		if (!pch->avail)
+			continue;
+
+		/*
+		 * Skip this channel if it has a fragment pending already and
+		 * we haven't given a fragment to all of the free channels.
+		 */
+		if (pch->avail == 1) {
+			if (nfree > 0)
+				continue;
+		} else {
+			pch->avail = 1;
+		}
+
+		/* check the channel's mtu and whether it is still attached. */
+		spin_lock(&pch->downl);
+		if (pch->chan == NULL) {
+			/* can't use this channel, it's being deregistered */
+			if (pch->speed == 0)
+				nzero--;
+			else
+				totspeed -= pch->speed;
+
+			spin_unlock(&pch->downl);
+			pch->avail = 0;
+			totlen = len;
+			totfree--;
+			nfree--;
+			if (--navail == 0)
+				break;
+			continue;
+		}
+
+		/*
+		*if the channel speed is not set divide
+		*the packet evenly among the free channels;
+		*otherwise divide it according to the speed
+		*of the channel we are going to transmit on
+		*/
+		flen = len;
+		if (nfree > 0) {
+			if (pch->speed == 0) {
+				flen = len/nfree;
+				if (nbigger > 0) {
+					flen++;
+					nbigger--;
+				}
+			} else {
+				flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
+					((totspeed*totfree)/pch->speed)) - hdrlen;
+				if (nbigger > 0) {
+					flen += ((totfree - nzero)*pch->speed)/totspeed;
+					nbigger -= ((totfree - nzero)*pch->speed)/
+							totspeed;
+				}
+			}
+			nfree--;
+		}
+
+		/*
+		 *check if we are on the last channel or
+		 *we exceded the length of the data to
+		 *fragment
+		 */
+		if ((nfree <= 0) || (flen > len))
+			flen = len;
+		/*
+		 *it is not worth to tx on slow channels:
+		 *in that case from the resulting flen according to the
+		 *above formula will be equal or less than zero.
+		 *Skip the channel in this case
+		 */
+		if (flen <= 0) {
+			pch->avail = 2;
+			spin_unlock(&pch->downl);
+			continue;
+		}
+
+		/*
+		 * hdrlen includes the 2-byte PPP protocol field, but the
+		 * MTU counts only the payload excluding the protocol field.
+		 * (RFC1661 Section 2)
+		 */
+		mtu = pch->chan->mtu - (hdrlen - 2);
+		if (mtu < 4)
+			mtu = 4;
+		if (flen > mtu)
+			flen = mtu;
+		if (flen == len)
+			bits |= E;
+		frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+		if (!frag)
+			goto noskb;
+		q = skb_put(frag, flen + hdrlen);
+
+		/* make the MP header */
+		put_unaligned_be16(PPP_MP, q);
+		if (ppp->flags & SC_MP_XSHORTSEQ) {
+			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+			q[3] = ppp->nxseq;
+		} else {
+			q[2] = bits;
+			q[3] = ppp->nxseq >> 16;
+			q[4] = ppp->nxseq >> 8;
+			q[5] = ppp->nxseq;
+		}
+
+		memcpy(q + hdrlen, p, flen);
+
+		/* try to send it down the channel */
+		chan = pch->chan;
+		if (!skb_queue_empty(&pch->file.xq) ||
+			!chan->ops->start_xmit(chan, frag))
+			skb_queue_tail(&pch->file.xq, frag);
+		pch->had_frag = 1;
+		p += flen;
+		len -= flen;
+		++ppp->nxseq;
+		bits = 0;
+		spin_unlock(&pch->downl);
+	}
+	ppp->nxchan = i;
+
+	return 1;
+
+ noskb:
+	spin_unlock(&pch->downl);
+	if (ppp->debug & 1)
+		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
+	++ppp->dev->stats.tx_errors;
+	++ppp->nxseq;
+	return 1;	/* abandon the frame */
+}
+#endif /* CONFIG_PPP_MULTILINK */
+
+/* Try to send data out on a channel */
+static void __ppp_channel_push(struct channel *pch)
+{
+	struct sk_buff *skb;
+	struct ppp *ppp;
+
+	spin_lock(&pch->downl);
+	if (pch->chan) {
+		while (!skb_queue_empty(&pch->file.xq)) {
+			skb = skb_dequeue(&pch->file.xq);
+			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
+				/* put the packet back and try again later */
+				skb_queue_head(&pch->file.xq, skb);
+				break;
+			}
+		}
+	} else {
+		/* channel got deregistered */
+		skb_queue_purge(&pch->file.xq);
+	}
+	spin_unlock(&pch->downl);
+	/* see if there is anything from the attached unit to be sent */
+	if (skb_queue_empty(&pch->file.xq)) {
+		ppp = pch->ppp;
+		if (ppp)
+			__ppp_xmit_process(ppp, NULL);
+	}
+}
+
+static void ppp_channel_push(struct channel *pch)
+{
+	read_lock_bh(&pch->upl);
+	if (pch->ppp) {
+		(*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+		__ppp_channel_push(pch);
+		(*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+	} else {
+		__ppp_channel_push(pch);
+	}
+	read_unlock_bh(&pch->upl);
+}
+
+/*
+ * Receive-side routines.
+ */
+
+struct ppp_mp_skb_parm {
+	u32		sequence;
+	u8		BEbits;
+};
+#define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
+
+static inline void
+ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+	ppp_recv_lock(ppp);
+	if (!ppp->closing)
+		ppp_receive_frame(ppp, skb, pch);
+	else
+		kfree_skb(skb);
+	ppp_recv_unlock(ppp);
+}
+
+void
+ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct channel *pch = chan->ppp;
+	int proto;
+
+	if (!pch) {
+		kfree_skb(skb);
+		return;
+	}
+
+	read_lock_bh(&pch->upl);
+	if (!pskb_may_pull(skb, 2)) {
+		kfree_skb(skb);
+		if (pch->ppp) {
+			++pch->ppp->dev->stats.rx_length_errors;
+			ppp_receive_error(pch->ppp);
+		}
+		goto done;
+	}
+
+	proto = PPP_PROTO(skb);
+	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
+		/* put it on the channel queue */
+		skb_queue_tail(&pch->file.rq, skb);
+		/* drop old frames if queue too long */
+		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
+		       (skb = skb_dequeue(&pch->file.rq)))
+			kfree_skb(skb);
+		wake_up_interruptible(&pch->file.rwait);
+	} else {
+		ppp_do_recv(pch->ppp, skb, pch);
+	}
+
+done:
+	read_unlock_bh(&pch->upl);
+}
+
+/* Put a 0-length skb in the receive queue as an error indication */
+void
+ppp_input_error(struct ppp_channel *chan, int code)
+{
+	struct channel *pch = chan->ppp;
+	struct sk_buff *skb;
+
+	if (!pch)
+		return;
+
+	read_lock_bh(&pch->upl);
+	if (pch->ppp) {
+		skb = alloc_skb(0, GFP_ATOMIC);
+		if (skb) {
+			skb->len = 0;		/* probably unnecessary */
+			skb->cb[0] = code;
+			ppp_do_recv(pch->ppp, skb, pch);
+		}
+	}
+	read_unlock_bh(&pch->upl);
+}
+
+/*
+ * We come in here to process a received frame.
+ * The receive side of the ppp unit is locked.
+ */
+static void
+ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+	/* note: a 0-length skb is used as an error indication */
+	if (skb->len > 0) {
+		skb_checksum_complete_unset(skb);
+#ifdef CONFIG_PPP_MULTILINK
+		/* XXX do channel-level decompression here */
+		if (PPP_PROTO(skb) == PPP_MP)
+			ppp_receive_mp_frame(ppp, skb, pch);
+		else
+#endif /* CONFIG_PPP_MULTILINK */
+			ppp_receive_nonmp_frame(ppp, skb);
+	} else {
+		kfree_skb(skb);
+		ppp_receive_error(ppp);
+	}
+}
+
+static void
+ppp_receive_error(struct ppp *ppp)
+{
+	++ppp->dev->stats.rx_errors;
+	if (ppp->vj)
+		slhc_toss(ppp->vj);
+}
+
+static void
+ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+	struct sk_buff *ns;
+	int proto, len, npi;
+
+	/*
+	 * Decompress the frame, if compressed.
+	 * Note that some decompressors need to see uncompressed frames
+	 * that come in as well as compressed frames.
+	 */
+	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
+	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
+		skb = ppp_decompress_frame(ppp, skb);
+
+	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
+		goto err;
+
+	proto = PPP_PROTO(skb);
+	switch (proto) {
+	case PPP_VJC_COMP:
+		/* decompress VJ compressed packets */
+		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
+			goto err;
+
+		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
+			/* copy to a new sk_buff with more tailroom */
+			ns = dev_alloc_skb(skb->len + 128);
+			if (!ns) {
+				netdev_err(ppp->dev, "PPP: no memory "
+					   "(VJ decomp)\n");
+				goto err;
+			}
+			skb_reserve(ns, 2);
+			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
+			consume_skb(skb);
+			skb = ns;
+		}
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
+		if (len <= 0) {
+			netdev_printk(KERN_DEBUG, ppp->dev,
+				      "PPP: VJ decompression error\n");
+			goto err;
+		}
+		len += 2;
+		if (len > skb->len)
+			skb_put(skb, len - skb->len);
+		else if (len < skb->len)
+			skb_trim(skb, len);
+		proto = PPP_IP;
+		break;
+
+	case PPP_VJC_UNCOMP:
+		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
+			goto err;
+
+		/* Until we fix the decompressor need to make sure
+		 * data portion is linear.
+		 */
+		if (!pskb_may_pull(skb, skb->len))
+			goto err;
+
+		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
+			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
+			goto err;
+		}
+		proto = PPP_IP;
+		break;
+
+	case PPP_CCP:
+		ppp_ccp_peek(ppp, skb, 1);
+		break;
+	}
+
+	++ppp->stats64.rx_packets;
+	ppp->stats64.rx_bytes += skb->len - 2;
+
+	npi = proto_to_npindex(proto);
+	if (npi < 0) {
+		/* control or unknown frame - pass it to pppd */
+		skb_queue_tail(&ppp->file.rq, skb);
+		/* limit queue length by dropping old frames */
+		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
+		       (skb = skb_dequeue(&ppp->file.rq)))
+			kfree_skb(skb);
+		/* wake up any process polling or blocking on read */
+		wake_up_interruptible(&ppp->file.rwait);
+
+	} else {
+		/* network protocol frame - give it to the kernel */
+
+#ifdef CONFIG_PPP_FILTER
+		/* check if the packet passes the pass and active filters */
+		/* the filter instructions are constructed assuming
+		   a four-byte PPP header on each packet */
+		if (ppp->pass_filter || ppp->active_filter) {
+			if (skb_unclone(skb, GFP_ATOMIC))
+				goto err;
+
+			*(u8 *)skb_push(skb, 2) = 0;
+			if (ppp->pass_filter &&
+			    BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
+				if (ppp->debug & 1)
+					netdev_printk(KERN_DEBUG, ppp->dev,
+						      "PPP: inbound frame "
+						      "not passed\n");
+				kfree_skb(skb);
+				return;
+			}
+			if (!(ppp->active_filter &&
+			      BPF_PROG_RUN(ppp->active_filter, skb) == 0))
+				ppp->last_recv = jiffies;
+			__skb_pull(skb, 2);
+		} else
+#endif /* CONFIG_PPP_FILTER */
+			ppp->last_recv = jiffies;
+
+		if ((ppp->dev->flags & IFF_UP) == 0 ||
+		    ppp->npmode[npi] != NPMODE_PASS) {
+			kfree_skb(skb);
+		} else {
+			/* chop off protocol */
+			skb_pull_rcsum(skb, 2);
+			skb->dev = ppp->dev;
+			skb->protocol = htons(npindex_to_ethertype[npi]);
+			skb_reset_mac_header(skb);
+			skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
+						      dev_net(ppp->dev)));
+			netif_rx(skb);
+		}
+	}
+	return;
+
+ err:
+	kfree_skb(skb);
+	ppp_receive_error(ppp);
+}
+
+static struct sk_buff *
+ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+	int proto = PPP_PROTO(skb);
+	struct sk_buff *ns;
+	int len;
+
+	/* Until we fix all the decompressor's need to make sure
+	 * data portion is linear.
+	 */
+	if (!pskb_may_pull(skb, skb->len))
+		goto err;
+
+	if (proto == PPP_COMP) {
+		int obuff_size;
+
+		switch(ppp->rcomp->compress_proto) {
+		case CI_MPPE:
+			obuff_size = ppp->mru + PPP_HDRLEN + 1;
+			break;
+		default:
+			obuff_size = ppp->mru + PPP_HDRLEN;
+			break;
+		}
+
+		ns = dev_alloc_skb(obuff_size);
+		if (!ns) {
+			netdev_err(ppp->dev, "ppp_decompress_frame: "
+				   "no memory\n");
+			goto err;
+		}
+		/* the decompressor still expects the A/C bytes in the hdr */
+		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
+				skb->len + 2, ns->data, obuff_size);
+		if (len < 0) {
+			/* Pass the compressed frame to pppd as an
+			   error indication. */
+			if (len == DECOMP_FATALERROR)
+				ppp->rstate |= SC_DC_FERROR;
+			kfree_skb(ns);
+			goto err;
+		}
+
+		consume_skb(skb);
+		skb = ns;
+		skb_put(skb, len);
+		skb_pull(skb, 2);	/* pull off the A/C bytes */
+
+	} else {
+		/* Uncompressed frame - pass to decompressor so it
+		   can update its dictionary if necessary. */
+		if (ppp->rcomp->incomp)
+			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
+					   skb->len + 2);
+	}
+
+	return skb;
+
+ err:
+	ppp->rstate |= SC_DC_ERROR;
+	ppp_receive_error(ppp);
+	return skb;
+}
+
+#ifdef CONFIG_PPP_MULTILINK
+/*
+ * Receive a multilink frame.
+ * We put it on the reconstruction queue and then pull off
+ * as many completed frames as we can.
+ */
+static void
+ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+	u32 mask, seq;
+	struct channel *ch;
+	int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
+
+	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
+		goto err;		/* no good, throw it away */
+
+	/* Decode sequence number and begin/end bits */
+	if (ppp->flags & SC_MP_SHORTSEQ) {
+		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
+		mask = 0xfff;
+	} else {
+		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
+		mask = 0xffffff;
+	}
+	PPP_MP_CB(skb)->BEbits = skb->data[2];
+	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
+
+	/*
+	 * Do protocol ID decompression on the first fragment of each packet.
+	 */
+	if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
+		*(u8 *)skb_push(skb, 1) = 0;
+
+	/*
+	 * Expand sequence number to 32 bits, making it as close
+	 * as possible to ppp->minseq.
+	 */
+	seq |= ppp->minseq & ~mask;
+	if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
+		seq += mask + 1;
+	else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
+		seq -= mask + 1;	/* should never happen */
+	PPP_MP_CB(skb)->sequence = seq;
+	pch->lastseq = seq;
+
+	/*
+	 * If this packet comes before the next one we were expecting,
+	 * drop it.
+	 */
+	if (seq_before(seq, ppp->nextseq)) {
+		kfree_skb(skb);
+		++ppp->dev->stats.rx_dropped;
+		ppp_receive_error(ppp);
+		return;
+	}
+
+	/*
+	 * Reevaluate minseq, the minimum over all channels of the
+	 * last sequence number received on each channel.  Because of
+	 * the increasing sequence number rule, we know that any fragment
+	 * before `minseq' which hasn't arrived is never going to arrive.
+	 * The list of channels can't change because we have the receive
+	 * side of the ppp unit locked.
+	 */
+	list_for_each_entry(ch, &ppp->channels, clist) {
+		if (seq_before(ch->lastseq, seq))
+			seq = ch->lastseq;
+	}
+	if (seq_before(ppp->minseq, seq))
+		ppp->minseq = seq;
+
+	/* Put the fragment on the reconstruction queue */
+	ppp_mp_insert(ppp, skb);
+
+	/* If the queue is getting long, don't wait any longer for packets
+	   before the start of the queue. */
+	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
+		struct sk_buff *mskb = skb_peek(&ppp->mrq);
+		if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
+			ppp->minseq = PPP_MP_CB(mskb)->sequence;
+	}
+
+	/* Pull completed packets off the queue and receive them. */
+	while ((skb = ppp_mp_reconstruct(ppp))) {
+		if (pskb_may_pull(skb, 2))
+			ppp_receive_nonmp_frame(ppp, skb);
+		else {
+			++ppp->dev->stats.rx_length_errors;
+			kfree_skb(skb);
+			ppp_receive_error(ppp);
+		}
+	}
+
+	return;
+
+ err:
+	kfree_skb(skb);
+	ppp_receive_error(ppp);
+}
+
+/*
+ * Insert a fragment on the MP reconstruction queue.
+ * The queue is ordered by increasing sequence number.
+ */
+static void
+ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
+{
+	struct sk_buff *p;
+	struct sk_buff_head *list = &ppp->mrq;
+	u32 seq = PPP_MP_CB(skb)->sequence;
+
+	/* N.B. we don't need to lock the list lock because we have the
+	   ppp unit receive-side lock. */
+	skb_queue_walk(list, p) {
+		if (seq_before(seq, PPP_MP_CB(p)->sequence))
+			break;
+	}
+	__skb_queue_before(list, p, skb);
+}
+
+/*
+ * Reconstruct a packet from the MP fragment queue.
+ * We go through increasing sequence numbers until we find a
+ * complete packet, or we get to the sequence number for a fragment
+ * which hasn't arrived but might still do so.
+ */
+static struct sk_buff *
+ppp_mp_reconstruct(struct ppp *ppp)
+{
+	u32 seq = ppp->nextseq;
+	u32 minseq = ppp->minseq;
+	struct sk_buff_head *list = &ppp->mrq;
+	struct sk_buff *p, *tmp;
+	struct sk_buff *head, *tail;
+	struct sk_buff *skb = NULL;
+	int lost = 0, len = 0;
+
+	if (ppp->mrru == 0)	/* do nothing until mrru is set */
+		return NULL;
+	head = list->next;
+	tail = NULL;
+	skb_queue_walk_safe(list, p, tmp) {
+	again:
+		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
+			/* this can't happen, anyway ignore the skb */
+			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+				   "seq %u < %u\n",
+				   PPP_MP_CB(p)->sequence, seq);
+			__skb_unlink(p, list);
+			kfree_skb(p);
+			continue;
+		}
+		if (PPP_MP_CB(p)->sequence != seq) {
+			u32 oldseq;
+			/* Fragment `seq' is missing.  If it is after
+			   minseq, it might arrive later, so stop here. */
+			if (seq_after(seq, minseq))
+				break;
+			/* Fragment `seq' is lost, keep going. */
+			lost = 1;
+			oldseq = seq;
+			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
+				minseq + 1: PPP_MP_CB(p)->sequence;
+
+			if (ppp->debug & 1)
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "lost frag %u..%u\n",
+					      oldseq, seq-1);
+
+			goto again;
+		}
+
+		/*
+		 * At this point we know that all the fragments from
+		 * ppp->nextseq to seq are either present or lost.
+		 * Also, there are no complete packets in the queue
+		 * that have no missing fragments and end before this
+		 * fragment.
+		 */
+
+		/* B bit set indicates this fragment starts a packet */
+		if (PPP_MP_CB(p)->BEbits & B) {
+			head = p;
+			lost = 0;
+			len = 0;
+		}
+
+		len += p->len;
+
+		/* Got a complete packet yet? */
+		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
+		    (PPP_MP_CB(head)->BEbits & B)) {
+			if (len > ppp->mrru + 2) {
+				++ppp->dev->stats.rx_length_errors;
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "PPP: reconstructed packet"
+					      " is too long (%d)\n", len);
+			} else {
+				tail = p;
+				break;
+			}
+			ppp->nextseq = seq + 1;
+		}
+
+		/*
+		 * If this is the ending fragment of a packet,
+		 * and we haven't found a complete valid packet yet,
+		 * we can discard up to and including this fragment.
+		 */
+		if (PPP_MP_CB(p)->BEbits & E) {
+			struct sk_buff *tmp2;
+
+			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+				if (ppp->debug & 1)
+					netdev_printk(KERN_DEBUG, ppp->dev,
+						      "discarding frag %u\n",
+						      PPP_MP_CB(p)->sequence);
+				__skb_unlink(p, list);
+				kfree_skb(p);
+			}
+			head = skb_peek(list);
+			if (!head)
+				break;
+		}
+		++seq;
+	}
+
+	/* If we have a complete packet, copy it all into one skb. */
+	if (tail != NULL) {
+		/* If we have discarded any fragments,
+		   signal a receive error. */
+		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
+			skb_queue_walk_safe(list, p, tmp) {
+				if (p == head)
+					break;
+				if (ppp->debug & 1)
+					netdev_printk(KERN_DEBUG, ppp->dev,
+						      "discarding frag %u\n",
+						      PPP_MP_CB(p)->sequence);
+				__skb_unlink(p, list);
+				kfree_skb(p);
+			}
+
+			if (ppp->debug & 1)
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "  missed pkts %u..%u\n",
+					      ppp->nextseq,
+					      PPP_MP_CB(head)->sequence-1);
+			++ppp->dev->stats.rx_dropped;
+			ppp_receive_error(ppp);
+		}
+
+		skb = head;
+		if (head != tail) {
+			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+			p = skb_queue_next(list, head);
+			__skb_unlink(skb, list);
+			skb_queue_walk_from_safe(list, p, tmp) {
+				__skb_unlink(p, list);
+				*fragpp = p;
+				p->next = NULL;
+				fragpp = &p->next;
+
+				skb->len += p->len;
+				skb->data_len += p->len;
+				skb->truesize += p->truesize;
+
+				if (p == tail)
+					break;
+			}
+		} else {
+			__skb_unlink(skb, list);
+		}
+
+		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
+	}
+
+	return skb;
+}
+#endif /* CONFIG_PPP_MULTILINK */
+
+/*
+ * Channel interface.
+ */
+
+/* Create a new, unattached ppp channel. */
+int ppp_register_channel(struct ppp_channel *chan)
+{
+	return ppp_register_net_channel(current->nsproxy->net_ns, chan);
+}
+
+/* Create a new, unattached ppp channel for specified net. */
+int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
+{
+	struct channel *pch;
+	struct ppp_net *pn;
+
+	pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+	if (!pch)
+		return -ENOMEM;
+
+	pn = ppp_pernet(net);
+
+	pch->ppp = NULL;
+	pch->chan = chan;
+	pch->chan_net = get_net(net);
+	chan->ppp = pch;
+	init_ppp_file(&pch->file, CHANNEL);
+	pch->file.hdrlen = chan->hdrlen;
+#ifdef CONFIG_PPP_MULTILINK
+	pch->lastseq = -1;
+#endif /* CONFIG_PPP_MULTILINK */
+	init_rwsem(&pch->chan_sem);
+	spin_lock_init(&pch->downl);
+	rwlock_init(&pch->upl);
+
+	spin_lock_bh(&pn->all_channels_lock);
+	pch->file.index = ++pn->last_channel_index;
+	list_add(&pch->list, &pn->new_channels);
+	atomic_inc(&channel_count);
+	spin_unlock_bh(&pn->all_channels_lock);
+
+	return 0;
+}
+
+/*
+ * Return the index of a channel.
+ */
+int ppp_channel_index(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+
+	if (pch)
+		return pch->file.index;
+	return -1;
+}
+
+/*
+ * Return the PPP unit number to which a channel is connected.
+ */
+int ppp_unit_number(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+	int unit = -1;
+
+	if (pch) {
+		read_lock_bh(&pch->upl);
+		if (pch->ppp)
+			unit = pch->ppp->file.index;
+		read_unlock_bh(&pch->upl);
+	}
+	return unit;
+}
+
+/*
+ * Return the PPP device interface name of a channel.
+ */
+char *ppp_dev_name(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+	char *name = NULL;
+
+	if (pch) {
+		read_lock_bh(&pch->upl);
+		if (pch->ppp && pch->ppp->dev)
+			name = pch->ppp->dev->name;
+		read_unlock_bh(&pch->upl);
+	}
+	return name;
+}
+
+
+/*
+ * Disconnect a channel from the generic layer.
+ * This must be called in process context.
+ */
+void
+ppp_unregister_channel(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+	struct ppp_net *pn;
+
+	if (!pch)
+		return;		/* should never happen */
+
+	chan->ppp = NULL;
+
+	/*
+	 * This ensures that we have returned from any calls into the
+	 * the channel's start_xmit or ioctl routine before we proceed.
+	 */
+	down_write(&pch->chan_sem);
+	spin_lock_bh(&pch->downl);
+	pch->chan = NULL;
+	spin_unlock_bh(&pch->downl);
+	up_write(&pch->chan_sem);
+	ppp_disconnect_channel(pch);
+
+	pn = ppp_pernet(pch->chan_net);
+	spin_lock_bh(&pn->all_channels_lock);
+	list_del(&pch->list);
+	spin_unlock_bh(&pn->all_channels_lock);
+
+	pch->file.dead = 1;
+	wake_up_interruptible(&pch->file.rwait);
+	if (refcount_dec_and_test(&pch->file.refcnt))
+		ppp_destroy_channel(pch);
+}
+
+/*
+ * Callback from a channel when it can accept more to transmit.
+ * This should be called at BH/softirq level, not interrupt level.
+ */
+void
+ppp_output_wakeup(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+
+	if (!pch)
+		return;
+	ppp_channel_push(pch);
+}
+
+/*
+ * Compression control.
+ */
+
+/* Process the PPPIOCSCOMPRESS ioctl. */
+static int
+ppp_set_compress(struct ppp *ppp, unsigned long arg)
+{
+	int err;
+	struct compressor *cp, *ocomp;
+	struct ppp_option_data data;
+	void *state, *ostate;
+	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
+
+	err = -EFAULT;
+	if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
+		goto out;
+	if (data.length > CCP_MAX_OPTION_LENGTH)
+		goto out;
+	if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length))
+		goto out;
+
+	err = -EINVAL;
+	if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length)
+		goto out;
+
+	cp = try_then_request_module(
+		find_compressor(ccp_option[0]),
+		"ppp-compress-%d", ccp_option[0]);
+	if (!cp)
+		goto out;
+
+	err = -ENOBUFS;
+	if (data.transmit) {
+		state = cp->comp_alloc(ccp_option, data.length);
+		if (state) {
+			ppp_xmit_lock(ppp);
+			ppp->xstate &= ~SC_COMP_RUN;
+			ocomp = ppp->xcomp;
+			ostate = ppp->xc_state;
+			ppp->xcomp = cp;
+			ppp->xc_state = state;
+			ppp_xmit_unlock(ppp);
+			if (ostate) {
+				ocomp->comp_free(ostate);
+				module_put(ocomp->owner);
+			}
+			err = 0;
+		} else
+			module_put(cp->owner);
+
+	} else {
+		state = cp->decomp_alloc(ccp_option, data.length);
+		if (state) {
+			ppp_recv_lock(ppp);
+			ppp->rstate &= ~SC_DECOMP_RUN;
+			ocomp = ppp->rcomp;
+			ostate = ppp->rc_state;
+			ppp->rcomp = cp;
+			ppp->rc_state = state;
+			ppp_recv_unlock(ppp);
+			if (ostate) {
+				ocomp->decomp_free(ostate);
+				module_put(ocomp->owner);
+			}
+			err = 0;
+		} else
+			module_put(cp->owner);
+	}
+
+ out:
+	return err;
+}
+
+/*
+ * Look at a CCP packet and update our state accordingly.
+ * We assume the caller has the xmit or recv path locked.
+ */
+static void
+ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
+{
+	unsigned char *dp;
+	int len;
+
+	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
+		return;	/* no header */
+	dp = skb->data + 2;
+
+	switch (CCP_CODE(dp)) {
+	case CCP_CONFREQ:
+
+		/* A ConfReq starts negotiation of compression
+		 * in one direction of transmission,
+		 * and hence brings it down...but which way?
+		 *
+		 * Remember:
+		 * A ConfReq indicates what the sender would like to receive
+		 */
+		if(inbound)
+			/* He is proposing what I should send */
+			ppp->xstate &= ~SC_COMP_RUN;
+		else
+			/* I am proposing to what he should send */
+			ppp->rstate &= ~SC_DECOMP_RUN;
+
+		break;
+
+	case CCP_TERMREQ:
+	case CCP_TERMACK:
+		/*
+		 * CCP is going down, both directions of transmission
+		 */
+		ppp->rstate &= ~SC_DECOMP_RUN;
+		ppp->xstate &= ~SC_COMP_RUN;
+		break;
+
+	case CCP_CONFACK:
+		if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
+			break;
+		len = CCP_LENGTH(dp);
+		if (!pskb_may_pull(skb, len + 2))
+			return;		/* too short */
+		dp += CCP_HDRLEN;
+		len -= CCP_HDRLEN;
+		if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
+			break;
+		if (inbound) {
+			/* we will start receiving compressed packets */
+			if (!ppp->rc_state)
+				break;
+			if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
+					ppp->file.index, 0, ppp->mru, ppp->debug)) {
+				ppp->rstate |= SC_DECOMP_RUN;
+				ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
+			}
+		} else {
+			/* we will soon start sending compressed packets */
+			if (!ppp->xc_state)
+				break;
+			if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
+					ppp->file.index, 0, ppp->debug))
+				ppp->xstate |= SC_COMP_RUN;
+		}
+		break;
+
+	case CCP_RESETACK:
+		/* reset the [de]compressor */
+		if ((ppp->flags & SC_CCP_UP) == 0)
+			break;
+		if (inbound) {
+			if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
+				ppp->rcomp->decomp_reset(ppp->rc_state);
+				ppp->rstate &= ~SC_DC_ERROR;
+			}
+		} else {
+			if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
+				ppp->xcomp->comp_reset(ppp->xc_state);
+		}
+		break;
+	}
+}
+
+/* Free up compression resources. */
+static void
+ppp_ccp_closed(struct ppp *ppp)
+{
+	void *xstate, *rstate;
+	struct compressor *xcomp, *rcomp;
+
+	ppp_lock(ppp);
+	ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
+	ppp->xstate = 0;
+	xcomp = ppp->xcomp;
+	xstate = ppp->xc_state;
+	ppp->xc_state = NULL;
+	ppp->rstate = 0;
+	rcomp = ppp->rcomp;
+	rstate = ppp->rc_state;
+	ppp->rc_state = NULL;
+	ppp_unlock(ppp);
+
+	if (xstate) {
+		xcomp->comp_free(xstate);
+		module_put(xcomp->owner);
+	}
+	if (rstate) {
+		rcomp->decomp_free(rstate);
+		module_put(rcomp->owner);
+	}
+}
+
+/* List of compressors. */
+static LIST_HEAD(compressor_list);
+static DEFINE_SPINLOCK(compressor_list_lock);
+
+struct compressor_entry {
+	struct list_head list;
+	struct compressor *comp;
+};
+
+static struct compressor_entry *
+find_comp_entry(int proto)
+{
+	struct compressor_entry *ce;
+
+	list_for_each_entry(ce, &compressor_list, list) {
+		if (ce->comp->compress_proto == proto)
+			return ce;
+	}
+	return NULL;
+}
+
+/* Register a compressor */
+int
+ppp_register_compressor(struct compressor *cp)
+{
+	struct compressor_entry *ce;
+	int ret;
+	spin_lock(&compressor_list_lock);
+	ret = -EEXIST;
+	if (find_comp_entry(cp->compress_proto))
+		goto out;
+	ret = -ENOMEM;
+	ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
+	if (!ce)
+		goto out;
+	ret = 0;
+	ce->comp = cp;
+	list_add(&ce->list, &compressor_list);
+ out:
+	spin_unlock(&compressor_list_lock);
+	return ret;
+}
+
+/* Unregister a compressor */
+void
+ppp_unregister_compressor(struct compressor *cp)
+{
+	struct compressor_entry *ce;
+
+	spin_lock(&compressor_list_lock);
+	ce = find_comp_entry(cp->compress_proto);
+	if (ce && ce->comp == cp) {
+		list_del(&ce->list);
+		kfree(ce);
+	}
+	spin_unlock(&compressor_list_lock);
+}
+
+/* Find a compressor. */
+static struct compressor *
+find_compressor(int type)
+{
+	struct compressor_entry *ce;
+	struct compressor *cp = NULL;
+
+	spin_lock(&compressor_list_lock);
+	ce = find_comp_entry(type);
+	if (ce) {
+		cp = ce->comp;
+		if (!try_module_get(cp->owner))
+			cp = NULL;
+	}
+	spin_unlock(&compressor_list_lock);
+	return cp;
+}
+
+/*
+ * Miscelleneous stuff.
+ */
+
+static void
+ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
+{
+	struct slcompress *vj = ppp->vj;
+
+	memset(st, 0, sizeof(*st));
+	st->p.ppp_ipackets = ppp->stats64.rx_packets;
+	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
+	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
+	st->p.ppp_opackets = ppp->stats64.tx_packets;
+	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
+	st->p.ppp_obytes = ppp->stats64.tx_bytes;
+	if (!vj)
+		return;
+	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
+	st->vj.vjs_compressed = vj->sls_o_compressed;
+	st->vj.vjs_searches = vj->sls_o_searches;
+	st->vj.vjs_misses = vj->sls_o_misses;
+	st->vj.vjs_errorin = vj->sls_i_error;
+	st->vj.vjs_tossed = vj->sls_i_tossed;
+	st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
+	st->vj.vjs_compressedin = vj->sls_i_compressed;
+}
+
+/*
+ * Stuff for handling the lists of ppp units and channels
+ * and for initialization.
+ */
+
+/*
+ * Create a new ppp interface unit.  Fails if it can't allocate memory
+ * or if there is already a unit with the requested number.
+ * unit == -1 means allocate a new number.
+ */
+static int ppp_create_interface(struct net *net, struct file *file, int *unit)
+{
+	struct ppp_config conf = {
+		.file = file,
+		.unit = *unit,
+		.ifname_is_set = false,
+	};
+	struct net_device *dev;
+	struct ppp *ppp;
+	int err;
+
+	dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err;
+	}
+	dev_net_set(dev, net);
+	dev->rtnl_link_ops = &ppp_link_ops;
+
+	rtnl_lock();
+
+	err = ppp_dev_configure(net, dev, &conf);
+	if (err < 0)
+		goto err_dev;
+	ppp = netdev_priv(dev);
+	*unit = ppp->file.index;
+
+	rtnl_unlock();
+
+	return 0;
+
+err_dev:
+	rtnl_unlock();
+	free_netdev(dev);
+err:
+	return err;
+}
+
+/*
+ * Initialize a ppp_file structure.
+ */
+static void
+init_ppp_file(struct ppp_file *pf, int kind)
+{
+	pf->kind = kind;
+	skb_queue_head_init(&pf->xq);
+	skb_queue_head_init(&pf->rq);
+	refcount_set(&pf->refcnt, 1);
+	init_waitqueue_head(&pf->rwait);
+}
+
+/*
+ * Free the memory used by a ppp unit.  This is only called once
+ * there are no channels connected to the unit and no file structs
+ * that reference the unit.
+ */
+static void ppp_destroy_interface(struct ppp *ppp)
+{
+	atomic_dec(&ppp_unit_count);
+
+	if (!ppp->file.dead || ppp->n_channels) {
+		/* "can't happen" */
+		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+			   "but dead=%d n_channels=%d !\n",
+			   ppp, ppp->file.dead, ppp->n_channels);
+		return;
+	}
+
+	ppp_ccp_closed(ppp);
+	if (ppp->vj) {
+		slhc_free(ppp->vj);
+		ppp->vj = NULL;
+	}
+	skb_queue_purge(&ppp->file.xq);
+	skb_queue_purge(&ppp->file.rq);
+#ifdef CONFIG_PPP_MULTILINK
+	skb_queue_purge(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+	if (ppp->pass_filter) {
+		bpf_prog_destroy(ppp->pass_filter);
+		ppp->pass_filter = NULL;
+	}
+
+	if (ppp->active_filter) {
+		bpf_prog_destroy(ppp->active_filter);
+		ppp->active_filter = NULL;
+	}
+#endif /* CONFIG_PPP_FILTER */
+
+	kfree_skb(ppp->xmit_pending);
+	free_percpu(ppp->xmit_recursion);
+
+	free_netdev(ppp->dev);
+}
+
+/*
+ * Locate an existing ppp unit.
+ * The caller should have locked the all_ppp_mutex.
+ */
+static struct ppp *
+ppp_find_unit(struct ppp_net *pn, int unit)
+{
+	return unit_find(&pn->units_idr, unit);
+}
+
+/*
+ * Locate an existing ppp channel.
+ * The caller should have locked the all_channels_lock.
+ * First we look in the new_channels list, then in the
+ * all_channels list.  If found in the new_channels list,
+ * we move it to the all_channels list.  This is for speed
+ * when we have a lot of channels in use.
+ */
+static struct channel *
+ppp_find_channel(struct ppp_net *pn, int unit)
+{
+	struct channel *pch;
+
+	list_for_each_entry(pch, &pn->new_channels, list) {
+		if (pch->file.index == unit) {
+			list_move(&pch->list, &pn->all_channels);
+			return pch;
+		}
+	}
+
+	list_for_each_entry(pch, &pn->all_channels, list) {
+		if (pch->file.index == unit)
+			return pch;
+	}
+
+	return NULL;
+}
+
+/*
+ * Connect a PPP channel to a PPP interface unit.
+ */
+static int
+ppp_connect_channel(struct channel *pch, int unit)
+{
+	struct ppp *ppp;
+	struct ppp_net *pn;
+	int ret = -ENXIO;
+	int hdrlen;
+
+	pn = ppp_pernet(pch->chan_net);
+
+	mutex_lock(&pn->all_ppp_mutex);
+	ppp = ppp_find_unit(pn, unit);
+	if (!ppp)
+		goto out;
+	write_lock_bh(&pch->upl);
+	ret = -EINVAL;
+	if (pch->ppp)
+		goto outl;
+
+	ppp_lock(ppp);
+	spin_lock_bh(&pch->downl);
+	if (!pch->chan) {
+		/* Don't connect unregistered channels */
+		spin_unlock_bh(&pch->downl);
+		ppp_unlock(ppp);
+		ret = -ENOTCONN;
+		goto outl;
+	}
+	spin_unlock_bh(&pch->downl);
+	if (pch->file.hdrlen > ppp->file.hdrlen)
+		ppp->file.hdrlen = pch->file.hdrlen;
+	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
+	if (hdrlen > ppp->dev->hard_header_len)
+		ppp->dev->hard_header_len = hdrlen;
+	list_add_tail(&pch->clist, &ppp->channels);
+	++ppp->n_channels;
+	pch->ppp = ppp;
+	refcount_inc(&ppp->file.refcnt);
+	ppp_unlock(ppp);
+	ret = 0;
+
+ outl:
+	write_unlock_bh(&pch->upl);
+ out:
+	mutex_unlock(&pn->all_ppp_mutex);
+	return ret;
+}
+
+/*
+ * Disconnect a channel from its ppp unit.
+ */
+static int
+ppp_disconnect_channel(struct channel *pch)
+{
+	struct ppp *ppp;
+	int err = -EINVAL;
+
+	write_lock_bh(&pch->upl);
+	ppp = pch->ppp;
+	pch->ppp = NULL;
+	write_unlock_bh(&pch->upl);
+	if (ppp) {
+		/* remove it from the ppp unit's list */
+		ppp_lock(ppp);
+		list_del(&pch->clist);
+		if (--ppp->n_channels == 0)
+			wake_up_interruptible(&ppp->file.rwait);
+		ppp_unlock(ppp);
+		if (refcount_dec_and_test(&ppp->file.refcnt))
+			ppp_destroy_interface(ppp);
+		err = 0;
+	}
+	return err;
+}
+
+/*
+ * Free up the resources used by a ppp channel.
+ */
+static void ppp_destroy_channel(struct channel *pch)
+{
+	put_net(pch->chan_net);
+	pch->chan_net = NULL;
+
+	atomic_dec(&channel_count);
+
+	if (!pch->file.dead) {
+		/* "can't happen" */
+		pr_err("ppp: destroying undead channel %p !\n", pch);
+		return;
+	}
+	skb_queue_purge(&pch->file.xq);
+	skb_queue_purge(&pch->file.rq);
+	kfree(pch);
+}
+
+static void __exit ppp_cleanup(void)
+{
+	/* should never happen */
+	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
+		pr_err("PPP: removing module but units remain!\n");
+	rtnl_link_unregister(&ppp_link_ops);
+	unregister_chrdev(PPP_MAJOR, "ppp");
+	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
+	class_destroy(ppp_class);
+	unregister_pernet_device(&ppp_net_ops);
+}
+
+/*
+ * Units handling. Caller must protect concurrent access
+ * by holding all_ppp_mutex
+ */
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+	int unit;
+
+	unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
+	if (unit == -ENOSPC)
+		unit = -EINVAL;
+	return unit;
+}
+
+/* get new free unit number and associate pointer with it */
+static int unit_get(struct idr *p, void *ptr)
+{
+	return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
+}
+
+/* put unit number back to a pool */
+static void unit_put(struct idr *p, int n)
+{
+	idr_remove(p, n);
+}
+
+/* get pointer associated with the number */
+static void *unit_find(struct idr *p, int n)
+{
+	return idr_find(p, n);
+}
+
+/* Module/initialization stuff */
+
+module_init(ppp_init);
+module_exit(ppp_cleanup);
+
+EXPORT_SYMBOL(ppp_register_net_channel);
+EXPORT_SYMBOL(ppp_register_channel);
+EXPORT_SYMBOL(ppp_unregister_channel);
+EXPORT_SYMBOL(ppp_channel_index);
+EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_dev_name);
+EXPORT_SYMBOL(ppp_input);
+EXPORT_SYMBOL(ppp_input_error);
+EXPORT_SYMBOL(ppp_output_wakeup);
+EXPORT_SYMBOL(ppp_register_compressor);
+EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS_RTNL_LINK("ppp");
+MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
new file mode 100644
index 0000000..a205750
--- /dev/null
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -0,0 +1,757 @@
+/*
+ * ppp_mppe.c - interface MPPE to the PPP code.
+ * This version is for use with Linux kernel 2.6.14+
+ *
+ * By Frank Cusack <fcusack@fcusack.com>.
+ * Copyright (c) 2002,2003,2004 Google, Inc.
+ * All rights reserved.
+ *
+ * License:
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies.  This software is provided without any
+ * warranty, express or implied.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Changelog:
+ *      08/12/05 - Matt Domsch <Matt_Domsch@dell.com>
+ *                 Only need extra skb padding on transmit, not receive.
+ *      06/18/04 - Matt Domsch <Matt_Domsch@dell.com>, Oleg Makarenko <mole@quadra.ru>
+ *                 Use Linux kernel 2.6 arc4 and sha1 routines rather than
+ *                 providing our own.
+ *      2/15/04 - TS: added #include <version.h> and testing for Kernel
+ *                    version before using
+ *                    MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are
+ *                    deprecated in 2.6
+ */
+
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/ppp_defs.h>
+#include <linux/ppp-comp.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+
+#include "ppp_mppe.h"
+
+MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
+MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
+MODULE_VERSION("1.0.2");
+
+static unsigned int
+setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
+{
+	sg_set_buf(sg, address, length);
+	return length;
+}
+
+#define SHA1_PAD_SIZE 40
+
+/*
+ * kernel crypto API needs its arguments to be in kmalloc'd memory, not in the module
+ * static data area.  That means sha_pad needs to be kmalloc'd.
+ */
+
+struct sha_pad {
+	unsigned char sha_pad1[SHA1_PAD_SIZE];
+	unsigned char sha_pad2[SHA1_PAD_SIZE];
+};
+static struct sha_pad *sha_pad;
+
+static inline void sha_pad_init(struct sha_pad *shapad)
+{
+	memset(shapad->sha_pad1, 0x00, sizeof(shapad->sha_pad1));
+	memset(shapad->sha_pad2, 0xF2, sizeof(shapad->sha_pad2));
+}
+
+/*
+ * State for an MPPE (de)compressor.
+ */
+struct ppp_mppe_state {
+	struct crypto_skcipher *arc4;
+	struct shash_desc *sha1;
+	unsigned char *sha1_digest;
+	unsigned char master_key[MPPE_MAX_KEY_LEN];
+	unsigned char session_key[MPPE_MAX_KEY_LEN];
+	unsigned keylen;	/* key length in bytes             */
+	/* NB: 128-bit == 16, 40-bit == 8! */
+	/* If we want to support 56-bit,   */
+	/* the unit has to change to bits  */
+	unsigned char bits;	/* MPPE control bits */
+	unsigned ccount;	/* 12-bit coherency count (seqno)  */
+	unsigned stateful;	/* stateful mode flag */
+	int discard;		/* stateful mode packet loss flag */
+	int sanity_errors;	/* take down LCP if too many */
+	int unit;
+	int debug;
+	struct compstat stats;
+};
+
+/* struct ppp_mppe_state.bits definitions */
+#define MPPE_BIT_A	0x80	/* Encryption table were (re)inititalized */
+#define MPPE_BIT_B	0x40	/* MPPC only (not implemented) */
+#define MPPE_BIT_C	0x20	/* MPPC only (not implemented) */
+#define MPPE_BIT_D	0x10	/* This is an encrypted frame */
+
+#define MPPE_BIT_FLUSHED	MPPE_BIT_A
+#define MPPE_BIT_ENCRYPTED	MPPE_BIT_D
+
+#define MPPE_BITS(p) ((p)[4] & 0xf0)
+#define MPPE_CCOUNT(p) ((((p)[4] & 0x0f) << 8) + (p)[5])
+#define MPPE_CCOUNT_SPACE 0x1000	/* The size of the ccount space */
+
+#define MPPE_OVHD	2	/* MPPE overhead/packet */
+#define SANITY_MAX	1600	/* Max bogon factor we will tolerate */
+
+/*
+ * Key Derivation, from RFC 3078, RFC 3079.
+ * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
+ */
+static void get_new_key_from_sha(struct ppp_mppe_state * state)
+{
+	crypto_shash_init(state->sha1);
+	crypto_shash_update(state->sha1, state->master_key,
+			    state->keylen);
+	crypto_shash_update(state->sha1, sha_pad->sha_pad1,
+			    sizeof(sha_pad->sha_pad1));
+	crypto_shash_update(state->sha1, state->session_key,
+			    state->keylen);
+	crypto_shash_update(state->sha1, sha_pad->sha_pad2,
+			    sizeof(sha_pad->sha_pad2));
+	crypto_shash_final(state->sha1, state->sha1_digest);
+}
+
+/*
+ * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3.
+ * Well, not what's written there, but rather what they meant.
+ */
+static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
+{
+	struct scatterlist sg_in[1], sg_out[1];
+	SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+
+	skcipher_request_set_tfm(req, state->arc4);
+	skcipher_request_set_callback(req, 0, NULL, NULL);
+
+	get_new_key_from_sha(state);
+	if (!initial_key) {
+		crypto_skcipher_setkey(state->arc4, state->sha1_digest,
+				       state->keylen);
+		sg_init_table(sg_in, 1);
+		sg_init_table(sg_out, 1);
+		setup_sg(sg_in, state->sha1_digest, state->keylen);
+		setup_sg(sg_out, state->session_key, state->keylen);
+		skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
+					   NULL);
+		if (crypto_skcipher_encrypt(req))
+    		    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
+	} else {
+		memcpy(state->session_key, state->sha1_digest, state->keylen);
+	}
+	if (state->keylen == 8) {
+		/* See RFC 3078 */
+		state->session_key[0] = 0xd1;
+		state->session_key[1] = 0x26;
+		state->session_key[2] = 0x9e;
+	}
+	crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+	skcipher_request_zero(req);
+}
+
+/*
+ * Allocate space for a (de)compressor.
+ */
+static void *mppe_alloc(unsigned char *options, int optlen)
+{
+	struct ppp_mppe_state *state;
+	struct crypto_shash *shash;
+	unsigned int digestsize;
+
+	if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
+	    options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+		goto out;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state == NULL)
+		goto out;
+
+
+	state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(state->arc4)) {
+		state->arc4 = NULL;
+		goto out_free;
+	}
+
+	shash = crypto_alloc_shash("sha1", 0, 0);
+	if (IS_ERR(shash))
+		goto out_free;
+
+	state->sha1 = kmalloc(sizeof(*state->sha1) +
+				     crypto_shash_descsize(shash),
+			      GFP_KERNEL);
+	if (!state->sha1) {
+		crypto_free_shash(shash);
+		goto out_free;
+	}
+	state->sha1->tfm = shash;
+	state->sha1->flags = 0;
+
+	digestsize = crypto_shash_digestsize(shash);
+	if (digestsize < MPPE_MAX_KEY_LEN)
+		goto out_free;
+
+	state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
+	if (!state->sha1_digest)
+		goto out_free;
+
+	/* Save keys. */
+	memcpy(state->master_key, &options[CILEN_MPPE],
+	       sizeof(state->master_key));
+	memcpy(state->session_key, state->master_key,
+	       sizeof(state->master_key));
+
+	/*
+	 * We defer initial key generation until mppe_init(), as mppe_alloc()
+	 * is called frequently during negotiation.
+	 */
+
+	return (void *)state;
+
+out_free:
+	kfree(state->sha1_digest);
+	if (state->sha1) {
+		crypto_free_shash(state->sha1->tfm);
+		kzfree(state->sha1);
+	}
+	crypto_free_skcipher(state->arc4);
+	kfree(state);
+out:
+	return NULL;
+}
+
+/*
+ * Deallocate space for a (de)compressor.
+ */
+static void mppe_free(void *arg)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+	if (state) {
+		kfree(state->sha1_digest);
+		crypto_free_shash(state->sha1->tfm);
+		kzfree(state->sha1);
+		crypto_free_skcipher(state->arc4);
+		kfree(state);
+	}
+}
+
+/*
+ * Initialize (de)compressor state.
+ */
+static int
+mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
+	  const char *debugstr)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+	unsigned char mppe_opts;
+
+	if (optlen != CILEN_MPPE ||
+	    options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+		return 0;
+
+	MPPE_CI_TO_OPTS(&options[2], mppe_opts);
+	if (mppe_opts & MPPE_OPT_128)
+		state->keylen = 16;
+	else if (mppe_opts & MPPE_OPT_40)
+		state->keylen = 8;
+	else {
+		printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr,
+		       unit);
+		return 0;
+	}
+	if (mppe_opts & MPPE_OPT_STATEFUL)
+		state->stateful = 1;
+
+	/* Generate the initial session key. */
+	mppe_rekey(state, 1);
+
+	if (debug) {
+		printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
+		       debugstr, unit, (state->keylen == 16) ? 128 : 40,
+		       (state->stateful) ? "stateful" : "stateless");
+		printk(KERN_DEBUG
+		       "%s[%d]: keys: master: %*phN initial session: %*phN\n",
+		       debugstr, unit,
+		       (int)sizeof(state->master_key), state->master_key,
+		       (int)sizeof(state->session_key), state->session_key);
+	}
+
+	/*
+	 * Initialize the coherency count.  The initial value is not specified
+	 * in RFC 3078, but we can make a reasonable assumption that it will
+	 * start at 0.  Setting it to the max here makes the comp/decomp code
+	 * do the right thing (determined through experiment).
+	 */
+	state->ccount = MPPE_CCOUNT_SPACE - 1;
+
+	/*
+	 * Note that even though we have initialized the key table, we don't
+	 * set the FLUSHED bit.  This is contrary to RFC 3078, sec. 3.1.
+	 */
+	state->bits = MPPE_BIT_ENCRYPTED;
+
+	state->unit = unit;
+	state->debug = debug;
+
+	return 1;
+}
+
+static int
+mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit,
+	       int hdrlen, int debug)
+{
+	/* ARGSUSED */
+	return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init");
+}
+
+/*
+ * We received a CCP Reset-Request (actually, we are sending a Reset-Ack),
+ * tell the compressor to rekey.  Note that we MUST NOT rekey for
+ * every CCP Reset-Request; we only rekey on the next xmit packet.
+ * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost.
+ * So, rekeying for every CCP Reset-Request is broken as the peer will not
+ * know how many times we've rekeyed.  (If we rekey and THEN get another
+ * CCP Reset-Request, we must rekey again.)
+ */
+static void mppe_comp_reset(void *arg)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+
+	state->bits |= MPPE_BIT_FLUSHED;
+}
+
+/*
+ * Compress (encrypt) a packet.
+ * It's strange to call this a compressor, since the output is always
+ * MPPE_OVHD + 2 bytes larger than the input.
+ */
+static int
+mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
+	      int isize, int osize)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+	SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+	int proto;
+	int err;
+	struct scatterlist sg_in[1], sg_out[1];
+
+	/*
+	 * Check that the protocol is in the range we handle.
+	 */
+	proto = PPP_PROTOCOL(ibuf);
+	if (proto < 0x0021 || proto > 0x00fa)
+		return 0;
+
+	/* Make sure we have enough room to generate an encrypted packet. */
+	if (osize < isize + MPPE_OVHD + 2) {
+		/* Drop the packet if we should encrypt it, but can't. */
+		printk(KERN_DEBUG "mppe_compress[%d]: osize too small! "
+		       "(have: %d need: %d)\n", state->unit,
+		       osize, osize + MPPE_OVHD + 2);
+		return -1;
+	}
+
+	osize = isize + MPPE_OVHD + 2;
+
+	/*
+	 * Copy over the PPP header and set control bits.
+	 */
+	obuf[0] = PPP_ADDRESS(ibuf);
+	obuf[1] = PPP_CONTROL(ibuf);
+	put_unaligned_be16(PPP_COMP, obuf + 2);
+	obuf += PPP_HDRLEN;
+
+	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
+	if (state->debug >= 7)
+		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
+		       state->ccount);
+	put_unaligned_be16(state->ccount, obuf);
+
+	if (!state->stateful ||	/* stateless mode     */
+	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
+	    (state->bits & MPPE_BIT_FLUSHED)) {	/* CCP Reset-Request  */
+		/* We must rekey */
+		if (state->debug && state->stateful)
+			printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n",
+			       state->unit);
+		mppe_rekey(state, 0);
+		state->bits |= MPPE_BIT_FLUSHED;
+	}
+	obuf[0] |= state->bits;
+	state->bits &= ~MPPE_BIT_FLUSHED;	/* reset for next xmit */
+
+	obuf += MPPE_OVHD;
+	ibuf += 2;		/* skip to proto field */
+	isize -= 2;
+
+	/* Encrypt packet */
+	sg_init_table(sg_in, 1);
+	sg_init_table(sg_out, 1);
+	setup_sg(sg_in, ibuf, isize);
+	setup_sg(sg_out, obuf, osize);
+
+	skcipher_request_set_tfm(req, state->arc4);
+	skcipher_request_set_callback(req, 0, NULL, NULL);
+	skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
+	err = crypto_skcipher_encrypt(req);
+	skcipher_request_zero(req);
+	if (err) {
+		printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
+		return -1;
+	}
+
+	state->stats.unc_bytes += isize;
+	state->stats.unc_packets++;
+	state->stats.comp_bytes += osize;
+	state->stats.comp_packets++;
+
+	return osize;
+}
+
+/*
+ * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going
+ * to look bad ... and the longer the link is up the worse it will get.
+ */
+static void mppe_comp_stats(void *arg, struct compstat *stats)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+
+	*stats = state->stats;
+}
+
+static int
+mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit,
+		 int hdrlen, int mru, int debug)
+{
+	/* ARGSUSED */
+	return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init");
+}
+
+/*
+ * We received a CCP Reset-Ack.  Just ignore it.
+ */
+static void mppe_decomp_reset(void *arg)
+{
+	/* ARGSUSED */
+	return;
+}
+
+/*
+ * Decompress (decrypt) an MPPE packet.
+ */
+static int
+mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
+		int osize)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+	SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+	unsigned ccount;
+	int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
+	struct scatterlist sg_in[1], sg_out[1];
+
+	if (isize <= PPP_HDRLEN + MPPE_OVHD) {
+		if (state->debug)
+			printk(KERN_DEBUG
+			       "mppe_decompress[%d]: short pkt (%d)\n",
+			       state->unit, isize);
+		return DECOMP_ERROR;
+	}
+
+	/*
+	 * Make sure we have enough room to decrypt the packet.
+	 * Note that for our test we only subtract 1 byte whereas in
+	 * mppe_compress() we added 2 bytes (+MPPE_OVHD);
+	 * this is to account for possible PFC.
+	 */
+	if (osize < isize - MPPE_OVHD - 1) {
+		printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
+		       "(have: %d need: %d)\n", state->unit,
+		       osize, isize - MPPE_OVHD - 1);
+		return DECOMP_ERROR;
+	}
+	osize = isize - MPPE_OVHD - 2;	/* assume no PFC */
+
+	ccount = MPPE_CCOUNT(ibuf);
+	if (state->debug >= 7)
+		printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n",
+		       state->unit, ccount);
+
+	/* sanity checks -- terminate with extreme prejudice */
+	if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) {
+		printk(KERN_DEBUG
+		       "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
+		       state->unit);
+		state->sanity_errors += 100;
+		goto sanity_error;
+	}
+	if (!state->stateful && !flushed) {
+		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
+		       "stateless mode!\n", state->unit);
+		state->sanity_errors += 100;
+		goto sanity_error;
+	}
+	if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
+		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
+		       "flag packet!\n", state->unit);
+		state->sanity_errors += 100;
+		goto sanity_error;
+	}
+
+	/*
+	 * Check the coherency count.
+	 */
+
+	if (!state->stateful) {
+		/* Discard late packet */
+		if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
+						> MPPE_CCOUNT_SPACE / 2) {
+			state->sanity_errors++;
+			goto sanity_error;
+		}
+
+		/* RFC 3078, sec 8.1.  Rekey for every packet. */
+		while (state->ccount != ccount) {
+			mppe_rekey(state, 0);
+			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
+		}
+	} else {
+		/* RFC 3078, sec 8.2. */
+		if (!state->discard) {
+			/* normal state */
+			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
+			if (ccount != state->ccount) {
+				/*
+				 * (ccount > state->ccount)
+				 * Packet loss detected, enter the discard state.
+				 * Signal the peer to rekey (by sending a CCP Reset-Request).
+				 */
+				state->discard = 1;
+				return DECOMP_ERROR;
+			}
+		} else {
+			/* discard state */
+			if (!flushed) {
+				/* ccp.c will be silent (no additional CCP Reset-Requests). */
+				return DECOMP_ERROR;
+			} else {
+				/* Rekey for every missed "flag" packet. */
+				while ((ccount & ~0xff) !=
+				       (state->ccount & ~0xff)) {
+					mppe_rekey(state, 0);
+					state->ccount =
+					    (state->ccount +
+					     256) % MPPE_CCOUNT_SPACE;
+				}
+
+				/* reset */
+				state->discard = 0;
+				state->ccount = ccount;
+				/*
+				 * Another problem with RFC 3078 here.  It implies that the
+				 * peer need not send a Reset-Ack packet.  But RFC 1962
+				 * requires it.  Hopefully, M$ does send a Reset-Ack; even
+				 * though it isn't required for MPPE synchronization, it is
+				 * required to reset CCP state.
+				 */
+			}
+		}
+		if (flushed)
+			mppe_rekey(state, 0);
+	}
+
+	/*
+	 * Fill in the first part of the PPP header.  The protocol field
+	 * comes from the decrypted data.
+	 */
+	obuf[0] = PPP_ADDRESS(ibuf);	/* +1 */
+	obuf[1] = PPP_CONTROL(ibuf);	/* +1 */
+	obuf += 2;
+	ibuf += PPP_HDRLEN + MPPE_OVHD;
+	isize -= PPP_HDRLEN + MPPE_OVHD;	/* -6 */
+	/* net osize: isize-4 */
+
+	/*
+	 * Decrypt the first byte in order to check if it is
+	 * a compressed or uncompressed protocol field.
+	 */
+	sg_init_table(sg_in, 1);
+	sg_init_table(sg_out, 1);
+	setup_sg(sg_in, ibuf, 1);
+	setup_sg(sg_out, obuf, 1);
+
+	skcipher_request_set_tfm(req, state->arc4);
+	skcipher_request_set_callback(req, 0, NULL, NULL);
+	skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
+	if (crypto_skcipher_decrypt(req)) {
+		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
+		osize = DECOMP_ERROR;
+		goto out_zap_req;
+	}
+
+	/*
+	 * Do PFC decompression.
+	 * This would be nicer if we were given the actual sk_buff
+	 * instead of a char *.
+	 */
+	if ((obuf[0] & 0x01) != 0) {
+		obuf[1] = obuf[0];
+		obuf[0] = 0;
+		obuf++;
+		osize++;
+	}
+
+	/* And finally, decrypt the rest of the packet. */
+	setup_sg(sg_in, ibuf + 1, isize - 1);
+	setup_sg(sg_out, obuf + 1, osize - 1);
+	skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
+	if (crypto_skcipher_decrypt(req)) {
+		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
+		osize = DECOMP_ERROR;
+		goto out_zap_req;
+	}
+
+	state->stats.unc_bytes += osize;
+	state->stats.unc_packets++;
+	state->stats.comp_bytes += isize;
+	state->stats.comp_packets++;
+
+	/* good packet credit */
+	state->sanity_errors >>= 1;
+
+out_zap_req:
+	skcipher_request_zero(req);
+	return osize;
+
+sanity_error:
+	if (state->sanity_errors < SANITY_MAX)
+		return DECOMP_ERROR;
+	else
+		/* Take LCP down if the peer is sending too many bogons.
+		 * We don't want to do this for a single or just a few
+		 * instances since it could just be due to packet corruption.
+		 */
+		return DECOMP_FATALERROR;
+}
+
+/*
+ * Incompressible data has arrived (this should never happen!).
+ * We should probably drop the link if the protocol is in the range
+ * of what should be encrypted.  At the least, we should drop this
+ * packet.  (How to do this?)
+ */
+static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt)
+{
+	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+
+	if (state->debug &&
+	    (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa))
+		printk(KERN_DEBUG
+		       "mppe_incomp[%d]: incompressible (unencrypted) data! "
+		       "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf));
+
+	state->stats.inc_bytes += icnt;
+	state->stats.inc_packets++;
+	state->stats.unc_bytes += icnt;
+	state->stats.unc_packets++;
+}
+
+/*************************************************************
+ * Module interface table
+ *************************************************************/
+
+/*
+ * Procedures exported to if_ppp.c.
+ */
+static struct compressor ppp_mppe = {
+	.compress_proto = CI_MPPE,
+	.comp_alloc     = mppe_alloc,
+	.comp_free      = mppe_free,
+	.comp_init      = mppe_comp_init,
+	.comp_reset     = mppe_comp_reset,
+	.compress       = mppe_compress,
+	.comp_stat      = mppe_comp_stats,
+	.decomp_alloc   = mppe_alloc,
+	.decomp_free    = mppe_free,
+	.decomp_init    = mppe_decomp_init,
+	.decomp_reset   = mppe_decomp_reset,
+	.decompress     = mppe_decompress,
+	.incomp         = mppe_incomp,
+	.decomp_stat    = mppe_comp_stats,
+	.owner          = THIS_MODULE,
+	.comp_extra     = MPPE_PAD,
+};
+
+/*
+ * ppp_mppe_init()
+ *
+ * Prior to allowing load, try to load the arc4 and sha1 crypto
+ * libraries.  The actual use will be allocated later, but
+ * this way the module will fail to insmod if they aren't available.
+ */
+
+static int __init ppp_mppe_init(void)
+{
+	int answer;
+	if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
+	      crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
+		return -ENODEV;
+
+	sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
+	if (!sha_pad)
+		return -ENOMEM;
+	sha_pad_init(sha_pad);
+
+	answer = ppp_register_compressor(&ppp_mppe);
+
+	if (answer == 0)
+		printk(KERN_INFO "PPP MPPE Compression module registered\n");
+	else
+		kfree(sha_pad);
+
+	return answer;
+}
+
+static void __exit ppp_mppe_cleanup(void)
+{
+	ppp_unregister_compressor(&ppp_mppe);
+	kfree(sha_pad);
+}
+
+module_init(ppp_mppe_init);
+module_exit(ppp_mppe_cleanup);
diff --git a/drivers/net/ppp/ppp_mppe.h b/drivers/net/ppp/ppp_mppe.h
new file mode 100644
index 0000000..677b3b3
--- /dev/null
+++ b/drivers/net/ppp/ppp_mppe.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define MPPE_PAD                4      /* MPPE growth per frame */
+#define MPPE_MAX_KEY_LEN       16      /* largest key length (128-bit) */
+
+/* option bits for ccp_options.mppe */
+#define MPPE_OPT_40            0x01    /* 40 bit */
+#define MPPE_OPT_128           0x02    /* 128 bit */
+#define MPPE_OPT_STATEFUL      0x04    /* stateful mode */
+/* unsupported opts */
+#define MPPE_OPT_56            0x08    /* 56 bit */
+#define MPPE_OPT_MPPC          0x10    /* MPPC compression */
+#define MPPE_OPT_D             0x20    /* Unknown */
+#define MPPE_OPT_UNSUPPORTED (MPPE_OPT_56|MPPE_OPT_MPPC|MPPE_OPT_D)
+#define MPPE_OPT_UNKNOWN       0x40    /* Bits !defined in RFC 3078 were set */
+
+/*
+ * This is not nice ... the alternative is a bitfield struct though.
+ * And unfortunately, we cannot share the same bits for the option
+ * names above since C and H are the same bit.  We could do a u_int32
+ * but then we have to do a htonl() all the time and/or we still need
+ * to know which octet is which.
+ */
+#define MPPE_C_BIT             0x01    /* MPPC */
+#define MPPE_D_BIT             0x10    /* Obsolete, usage unknown */
+#define MPPE_L_BIT             0x20    /* 40-bit */
+#define MPPE_S_BIT             0x40    /* 128-bit */
+#define MPPE_M_BIT             0x80    /* 56-bit, not supported */
+#define MPPE_H_BIT             0x01    /* Stateless (in a different byte) */
+
+/* Does not include H bit; used for least significant octet only. */
+#define MPPE_ALL_BITS (MPPE_D_BIT|MPPE_L_BIT|MPPE_S_BIT|MPPE_M_BIT|MPPE_H_BIT)
+
+/* Build a CI from mppe opts (see RFC 3078) */
+#define MPPE_OPTS_TO_CI(opts, ci)              \
+    do {                                       \
+       u_char *ptr = ci; /* u_char[4] */       \
+                                               \
+       /* H bit */                             \
+       if (opts & MPPE_OPT_STATEFUL)           \
+           *ptr++ = 0x0;                       \
+       else                                    \
+           *ptr++ = MPPE_H_BIT;                \
+       *ptr++ = 0;                             \
+       *ptr++ = 0;                             \
+                                               \
+       /* S,L bits */                          \
+       *ptr = 0;                               \
+       if (opts & MPPE_OPT_128)                \
+           *ptr |= MPPE_S_BIT;                 \
+       if (opts & MPPE_OPT_40)                 \
+           *ptr |= MPPE_L_BIT;                 \
+       /* M,D,C bits not supported */          \
+    } while (/* CONSTCOND */ 0)
+
+/* The reverse of the above */
+#define MPPE_CI_TO_OPTS(ci, opts)              \
+    do {                                       \
+       u_char *ptr = ci; /* u_char[4] */       \
+                                               \
+       opts = 0;                               \
+                                               \
+       /* H bit */                             \
+       if (!(ptr[0] & MPPE_H_BIT))             \
+           opts |= MPPE_OPT_STATEFUL;          \
+                                               \
+       /* S,L bits */                          \
+       if (ptr[3] & MPPE_S_BIT)                \
+           opts |= MPPE_OPT_128;               \
+       if (ptr[3] & MPPE_L_BIT)                \
+           opts |= MPPE_OPT_40;                \
+                                               \
+       /* M,D,C bits */                        \
+       if (ptr[3] & MPPE_M_BIT)                \
+           opts |= MPPE_OPT_56;                \
+       if (ptr[3] & MPPE_D_BIT)                \
+           opts |= MPPE_OPT_D;                 \
+       if (ptr[3] & MPPE_C_BIT)                \
+           opts |= MPPE_OPT_MPPC;              \
+                                               \
+       /* Other bits */                        \
+       if (ptr[0] & ~MPPE_H_BIT)               \
+           opts |= MPPE_OPT_UNKNOWN;           \
+       if (ptr[1] || ptr[2])                   \
+           opts |= MPPE_OPT_UNKNOWN;           \
+       if (ptr[3] & ~MPPE_ALL_BITS)            \
+           opts |= MPPE_OPT_UNKNOWN;           \
+    } while (/* CONSTCOND */ 0)
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
new file mode 100644
index 0000000..047f6c6
--- /dev/null
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -0,0 +1,741 @@
+/*
+ * PPP synchronous tty channel driver for Linux.
+ *
+ * This is a ppp channel driver that can be used with tty device drivers
+ * that are frame oriented, such as synchronous HDLC devices.
+ *
+ * Complete PPP frames without encoding/decoding are exchanged between
+ * the channel driver and the device driver.
+ *
+ * The async map IOCTL codes are implemented to keep the user mode
+ * applications happy if they call them. Synchronous PPP does not use
+ * the async maps.
+ *
+ * Copyright 1999 Paul Mackerras.
+ *
+ * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * This driver provides the encapsulation and framing for sending
+ * and receiving PPP frames over sync serial lines.  It relies on
+ * the generic PPP layer to give it frames to send and to process
+ * received frames.  It implements the PPP line discipline.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ *
+ * ==FILEVERSION 20040616==
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/ppp_channel.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/refcount.h>
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+
+#define PPP_VERSION	"2.4.2"
+
+/* Structure for storing local state. */
+struct syncppp {
+	struct tty_struct *tty;
+	unsigned int	flags;
+	unsigned int	rbits;
+	int		mru;
+	spinlock_t	xmit_lock;
+	spinlock_t	recv_lock;
+	unsigned long	xmit_flags;
+	u32		xaccm[8];
+	u32		raccm;
+	unsigned int	bytes_sent;
+	unsigned int	bytes_rcvd;
+
+	struct sk_buff	*tpkt;
+	unsigned long	last_xmit;
+
+	struct sk_buff_head rqueue;
+
+	struct tasklet_struct tsk;
+
+	refcount_t	refcnt;
+	struct completion dead_cmp;
+	struct ppp_channel chan;	/* interface to generic ppp layer */
+};
+
+/* Bit numbers in xmit_flags */
+#define XMIT_WAKEUP	0
+#define XMIT_FULL	1
+
+/* Bits in rbits */
+#define SC_RCV_BITS	(SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define PPPSYNC_MAX_RQLEN	32	/* arbitrary */
+
+/*
+ * Prototypes.
+ */
+static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
+static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
+static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
+			  unsigned long arg);
+static void ppp_sync_process(unsigned long arg);
+static int ppp_sync_push(struct syncppp *ap);
+static void ppp_sync_flush_output(struct syncppp *ap);
+static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+			   char *flags, int count);
+
+static const struct ppp_channel_ops sync_ops = {
+	.start_xmit = ppp_sync_send,
+	.ioctl      = ppp_sync_ioctl,
+};
+
+/*
+ * Utility procedure to print a buffer in hex/ascii
+ */
+static void
+ppp_print_buffer (const char *name, const __u8 *buf, int count)
+{
+	if (name != NULL)
+		printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
+
+	print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
+}
+
+
+/*
+ * Routines implementing the synchronous PPP line discipline.
+ */
+
+/*
+ * We have a potential race on dereferencing tty->disc_data,
+ * because the tty layer provides no locking at all - thus one
+ * cpu could be running ppp_synctty_receive while another
+ * calls ppp_synctty_close, which zeroes tty->disc_data and
+ * frees the memory that ppp_synctty_receive is using.  The best
+ * way to fix this is to use a rwlock in the tty struct, but for now
+ * we use a single global rwlock for all ttys in ppp line discipline.
+ *
+ * FIXME: Fixed in tty_io nowadays.
+ */
+static DEFINE_RWLOCK(disc_data_lock);
+
+static struct syncppp *sp_get(struct tty_struct *tty)
+{
+	struct syncppp *ap;
+
+	read_lock(&disc_data_lock);
+	ap = tty->disc_data;
+	if (ap != NULL)
+		refcount_inc(&ap->refcnt);
+	read_unlock(&disc_data_lock);
+	return ap;
+}
+
+static void sp_put(struct syncppp *ap)
+{
+	if (refcount_dec_and_test(&ap->refcnt))
+		complete(&ap->dead_cmp);
+}
+
+/*
+ * Called when a tty is put into sync-PPP line discipline.
+ */
+static int
+ppp_sync_open(struct tty_struct *tty)
+{
+	struct syncppp *ap;
+	int err;
+	int speed;
+
+	if (tty->ops->write == NULL)
+		return -EOPNOTSUPP;
+
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!ap)
+		goto out;
+
+	/* initialize the syncppp structure */
+	ap->tty = tty;
+	ap->mru = PPP_MRU;
+	spin_lock_init(&ap->xmit_lock);
+	spin_lock_init(&ap->recv_lock);
+	ap->xaccm[0] = ~0U;
+	ap->xaccm[3] = 0x60000000U;
+	ap->raccm = ~0U;
+
+	skb_queue_head_init(&ap->rqueue);
+	tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
+
+	refcount_set(&ap->refcnt, 1);
+	init_completion(&ap->dead_cmp);
+
+	ap->chan.private = ap;
+	ap->chan.ops = &sync_ops;
+	ap->chan.mtu = PPP_MRU;
+	ap->chan.hdrlen = 2;	/* for A/C bytes */
+	speed = tty_get_baud_rate(tty);
+	ap->chan.speed = speed;
+	err = ppp_register_channel(&ap->chan);
+	if (err)
+		goto out_free;
+
+	tty->disc_data = ap;
+	tty->receive_room = 65536;
+	return 0;
+
+ out_free:
+	kfree(ap);
+ out:
+	return err;
+}
+
+/*
+ * Called when the tty is put into another line discipline
+ * or it hangs up.  We have to wait for any cpu currently
+ * executing in any of the other ppp_synctty_* routines to
+ * finish before we can call ppp_unregister_channel and free
+ * the syncppp struct.  This routine must be called from
+ * process context, not interrupt or softirq context.
+ */
+static void
+ppp_sync_close(struct tty_struct *tty)
+{
+	struct syncppp *ap;
+
+	write_lock_irq(&disc_data_lock);
+	ap = tty->disc_data;
+	tty->disc_data = NULL;
+	write_unlock_irq(&disc_data_lock);
+	if (!ap)
+		return;
+
+	/*
+	 * We have now ensured that nobody can start using ap from now
+	 * on, but we have to wait for all existing users to finish.
+	 * Note that ppp_unregister_channel ensures that no calls to
+	 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
+	 * by the time it returns.
+	 */
+	if (!refcount_dec_and_test(&ap->refcnt))
+		wait_for_completion(&ap->dead_cmp);
+	tasklet_kill(&ap->tsk);
+
+	ppp_unregister_channel(&ap->chan);
+	skb_queue_purge(&ap->rqueue);
+	kfree_skb(ap->tpkt);
+	kfree(ap);
+}
+
+/*
+ * Called on tty hangup in process context.
+ *
+ * Wait for I/O to driver to complete and unregister PPP channel.
+ * This is already done by the close routine, so just call that.
+ */
+static int ppp_sync_hangup(struct tty_struct *tty)
+{
+	ppp_sync_close(tty);
+	return 0;
+}
+
+/*
+ * Read does nothing - no data is ever available this way.
+ * Pppd reads and writes packets via /dev/ppp instead.
+ */
+static ssize_t
+ppp_sync_read(struct tty_struct *tty, struct file *file,
+	       unsigned char __user *buf, size_t count)
+{
+	return -EAGAIN;
+}
+
+/*
+ * Write on the tty does nothing, the packets all come in
+ * from the ppp generic stuff.
+ */
+static ssize_t
+ppp_sync_write(struct tty_struct *tty, struct file *file,
+		const unsigned char *buf, size_t count)
+{
+	return -EAGAIN;
+}
+
+static int
+ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
+		  unsigned int cmd, unsigned long arg)
+{
+	struct syncppp *ap = sp_get(tty);
+	int __user *p = (int __user *)arg;
+	int err, val;
+
+	if (!ap)
+		return -ENXIO;
+	err = -EFAULT;
+	switch (cmd) {
+	case PPPIOCGCHAN:
+		err = -EFAULT;
+		if (put_user(ppp_channel_index(&ap->chan), p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGUNIT:
+		err = -EFAULT;
+		if (put_user(ppp_unit_number(&ap->chan), p))
+			break;
+		err = 0;
+		break;
+
+	case TCFLSH:
+		/* flush our buffers and the serial port's buffer */
+		if (arg == TCIOFLUSH || arg == TCOFLUSH)
+			ppp_sync_flush_output(ap);
+		err = n_tty_ioctl_helper(tty, file, cmd, arg);
+		break;
+
+	case FIONREAD:
+		val = 0;
+		if (put_user(val, p))
+			break;
+		err = 0;
+		break;
+
+	default:
+		err = tty_mode_ioctl(tty, file, cmd, arg);
+		break;
+	}
+
+	sp_put(ap);
+	return err;
+}
+
+/* No kernel lock - fine */
+static __poll_t
+ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
+{
+	return 0;
+}
+
+/* May sleep, don't call from interrupt level or with interrupts disabled */
+static void
+ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
+		  char *cflags, int count)
+{
+	struct syncppp *ap = sp_get(tty);
+	unsigned long flags;
+
+	if (!ap)
+		return;
+	spin_lock_irqsave(&ap->recv_lock, flags);
+	ppp_sync_input(ap, buf, cflags, count);
+	spin_unlock_irqrestore(&ap->recv_lock, flags);
+	if (!skb_queue_empty(&ap->rqueue))
+		tasklet_schedule(&ap->tsk);
+	sp_put(ap);
+	tty_unthrottle(tty);
+}
+
+static void
+ppp_sync_wakeup(struct tty_struct *tty)
+{
+	struct syncppp *ap = sp_get(tty);
+
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	if (!ap)
+		return;
+	set_bit(XMIT_WAKEUP, &ap->xmit_flags);
+	tasklet_schedule(&ap->tsk);
+	sp_put(ap);
+}
+
+
+static struct tty_ldisc_ops ppp_sync_ldisc = {
+	.owner	= THIS_MODULE,
+	.magic	= TTY_LDISC_MAGIC,
+	.name	= "pppsync",
+	.open	= ppp_sync_open,
+	.close	= ppp_sync_close,
+	.hangup	= ppp_sync_hangup,
+	.read	= ppp_sync_read,
+	.write	= ppp_sync_write,
+	.ioctl	= ppp_synctty_ioctl,
+	.poll	= ppp_sync_poll,
+	.receive_buf = ppp_sync_receive,
+	.write_wakeup = ppp_sync_wakeup,
+};
+
+static int __init
+ppp_sync_init(void)
+{
+	int err;
+
+	err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
+	if (err != 0)
+		printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
+		       err);
+	return err;
+}
+
+/*
+ * The following routines provide the PPP channel interface.
+ */
+static int
+ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+{
+	struct syncppp *ap = chan->private;
+	int err, val;
+	u32 accm[8];
+	void __user *argp = (void __user *)arg;
+	u32 __user *p = argp;
+
+	err = -EFAULT;
+	switch (cmd) {
+	case PPPIOCGFLAGS:
+		val = ap->flags | ap->rbits;
+		if (put_user(val, (int __user *) argp))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSFLAGS:
+		if (get_user(val, (int __user *) argp))
+			break;
+		ap->flags = val & ~SC_RCV_BITS;
+		spin_lock_irq(&ap->recv_lock);
+		ap->rbits = val & SC_RCV_BITS;
+		spin_unlock_irq(&ap->recv_lock);
+		err = 0;
+		break;
+
+	case PPPIOCGASYNCMAP:
+		if (put_user(ap->xaccm[0], p))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSASYNCMAP:
+		if (get_user(ap->xaccm[0], p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGRASYNCMAP:
+		if (put_user(ap->raccm, p))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSRASYNCMAP:
+		if (get_user(ap->raccm, p))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCGXASYNCMAP:
+		if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSXASYNCMAP:
+		if (copy_from_user(accm, argp, sizeof(accm)))
+			break;
+		accm[2] &= ~0x40000000U;	/* can't escape 0x5e */
+		accm[3] |= 0x60000000U;		/* must escape 0x7d, 0x7e */
+		memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
+		err = 0;
+		break;
+
+	case PPPIOCGMRU:
+		if (put_user(ap->mru, (int __user *) argp))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSMRU:
+		if (get_user(val, (int __user *) argp))
+			break;
+		if (val < PPP_MRU)
+			val = PPP_MRU;
+		ap->mru = val;
+		err = 0;
+		break;
+
+	default:
+		err = -ENOTTY;
+	}
+	return err;
+}
+
+/*
+ * This is called at softirq level to deliver received packets
+ * to the ppp_generic code, and to tell the ppp_generic code
+ * if we can accept more output now.
+ */
+static void ppp_sync_process(unsigned long arg)
+{
+	struct syncppp *ap = (struct syncppp *) arg;
+	struct sk_buff *skb;
+
+	/* process received packets */
+	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
+		if (skb->len == 0) {
+			/* zero length buffers indicate error */
+			ppp_input_error(&ap->chan, 0);
+			kfree_skb(skb);
+		}
+		else
+			ppp_input(&ap->chan, skb);
+	}
+
+	/* try to push more stuff out */
+	if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
+		ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Procedures for encapsulation and framing.
+ */
+
+static struct sk_buff*
+ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
+{
+	int proto;
+	unsigned char *data;
+	int islcp;
+
+	data  = skb->data;
+	proto = get_unaligned_be16(data);
+
+	/* LCP packets with codes between 1 (configure-request)
+	 * and 7 (code-reject) must be sent as though no options
+	 * have been negotiated.
+	 */
+	islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+	/* compress protocol field if option enabled */
+	if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
+		skb_pull(skb,1);
+
+	/* prepend address/control fields if necessary */
+	if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
+		if (skb_headroom(skb) < 2) {
+			struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
+			if (npkt == NULL) {
+				kfree_skb(skb);
+				return NULL;
+			}
+			skb_reserve(npkt,2);
+			skb_copy_from_linear_data(skb,
+				      skb_put(npkt, skb->len), skb->len);
+			consume_skb(skb);
+			skb = npkt;
+		}
+		skb_push(skb,2);
+		skb->data[0] = PPP_ALLSTATIONS;
+		skb->data[1] = PPP_UI;
+	}
+
+	ap->last_xmit = jiffies;
+
+	if (skb && ap->flags & SC_LOG_OUTPKT)
+		ppp_print_buffer ("send buffer", skb->data, skb->len);
+
+	return skb;
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/*
+ * Send a packet to the peer over an sync tty line.
+ * Returns 1 iff the packet was accepted.
+ * If the packet was not accepted, we will call ppp_output_wakeup
+ * at some later time.
+ */
+static int
+ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct syncppp *ap = chan->private;
+
+	ppp_sync_push(ap);
+
+	if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
+		return 0;	/* already full */
+	skb = ppp_sync_txmunge(ap, skb);
+	if (skb != NULL)
+		ap->tpkt = skb;
+	else
+		clear_bit(XMIT_FULL, &ap->xmit_flags);
+
+	ppp_sync_push(ap);
+	return 1;
+}
+
+/*
+ * Push as much data as possible out to the tty.
+ */
+static int
+ppp_sync_push(struct syncppp *ap)
+{
+	int sent, done = 0;
+	struct tty_struct *tty = ap->tty;
+	int tty_stuffed = 0;
+
+	if (!spin_trylock_bh(&ap->xmit_lock))
+		return 0;
+	for (;;) {
+		if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
+			tty_stuffed = 0;
+		if (!tty_stuffed && ap->tpkt) {
+			set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+			sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
+			if (sent < 0)
+				goto flush;	/* error, e.g. loss of CD */
+			if (sent < ap->tpkt->len) {
+				tty_stuffed = 1;
+			} else {
+				consume_skb(ap->tpkt);
+				ap->tpkt = NULL;
+				clear_bit(XMIT_FULL, &ap->xmit_flags);
+				done = 1;
+			}
+			continue;
+		}
+		/* haven't made any progress */
+		spin_unlock_bh(&ap->xmit_lock);
+		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
+		      (!tty_stuffed && ap->tpkt)))
+			break;
+		if (!spin_trylock_bh(&ap->xmit_lock))
+			break;
+	}
+	return done;
+
+flush:
+	if (ap->tpkt) {
+		kfree_skb(ap->tpkt);
+		ap->tpkt = NULL;
+		clear_bit(XMIT_FULL, &ap->xmit_flags);
+		done = 1;
+	}
+	spin_unlock_bh(&ap->xmit_lock);
+	return done;
+}
+
+/*
+ * Flush output from our internal buffers.
+ * Called for the TCFLSH ioctl.
+ */
+static void
+ppp_sync_flush_output(struct syncppp *ap)
+{
+	int done = 0;
+
+	spin_lock_bh(&ap->xmit_lock);
+	if (ap->tpkt != NULL) {
+		kfree_skb(ap->tpkt);
+		ap->tpkt = NULL;
+		clear_bit(XMIT_FULL, &ap->xmit_flags);
+		done = 1;
+	}
+	spin_unlock_bh(&ap->xmit_lock);
+	if (done)
+		ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Receive-side routines.
+ */
+
+/* called when the tty driver has data for us.
+ *
+ * Data is frame oriented: each call to ppp_sync_input is considered
+ * a whole frame. If the 1st flag byte is non-zero then the whole
+ * frame is considered to be in error and is tossed.
+ */
+static void
+ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+		char *flags, int count)
+{
+	struct sk_buff *skb;
+	unsigned char *p;
+
+	if (count == 0)
+		return;
+
+	if (ap->flags & SC_LOG_INPKT)
+		ppp_print_buffer ("receive buffer", buf, count);
+
+	/* stuff the chars in the skb */
+	skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
+	if (!skb) {
+		printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
+		goto err;
+	}
+	/* Try to get the payload 4-byte aligned */
+	if (buf[0] != PPP_ALLSTATIONS)
+		skb_reserve(skb, 2 + (buf[0] & 1));
+
+	if (flags && *flags) {
+		/* error flag set, ignore frame */
+		goto err;
+	} else if (count > skb_tailroom(skb)) {
+		/* packet overflowed MRU */
+		goto err;
+	}
+
+	skb_put_data(skb, buf, count);
+
+	/* strip address/control field if present */
+	p = skb->data;
+	if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+		/* chop off address/control */
+		if (skb->len < 3)
+			goto err;
+		p = skb_pull(skb, 2);
+	}
+
+	/* decompress protocol field if compressed */
+	if (p[0] & 1) {
+		/* protocol is compressed */
+		*(u8 *)skb_push(skb, 1) = 0;
+	} else if (skb->len < 2)
+		goto err;
+
+	/* queue the frame to be processed */
+	skb_queue_tail(&ap->rqueue, skb);
+	return;
+
+err:
+	/* queue zero length packet as error indication */
+	if (skb || (skb = dev_alloc_skb(0))) {
+		skb_trim(skb, 0);
+		skb_queue_tail(&ap->rqueue, skb);
+	}
+}
+
+static void __exit
+ppp_sync_cleanup(void)
+{
+	if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
+		printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
+}
+
+module_init(ppp_sync_init);
+module_exit(ppp_sync_cleanup);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
new file mode 100644
index 0000000..62dc564
--- /dev/null
+++ b/drivers/net/ppp/pppoe.c
@@ -0,0 +1,1205 @@
+/** -*- linux-c -*- ***********************************************************
+ * Linux PPP over Ethernet (PPPoX/PPPoE) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoE --- PPP over Ethernet (RFC 2516)
+ *
+ *
+ * Version:	0.7.0
+ *
+ * 070228 :	Fix to allow multiple sessions with same remote MAC and same
+ *		session id by including the local device ifindex in the
+ *		tuple identifying a session. This also ensures packets can't
+ *		be injected into a session from interfaces other than the one
+ *		specified by userspace. Florian Zumbiehl <florz@florz.de>
+ *		(Oh, BTW, this one is YYMMDD, in case you were wondering ...)
+ * 220102 :	Fix module use count on failure in pppoe_create, pppox_sk -acme
+ * 030700 :	Fixed connect logic to allow for disconnect.
+ * 270700 :	Fixed potential SMP problems; we must protect against
+ *		simultaneous invocation of ppp_input
+ *		and ppp_unregister_channel.
+ * 040800 :	Respect reference count mechanisms on net-devices.
+ * 200800 :	fix kfree(skb) in pppoe_rcv (acme)
+ *		Module reference count is decremented in the right spot now,
+ *		guards against sock_put not actually freeing the sk
+ *		in pppoe_release.
+ * 051000 :	Initialization cleanup.
+ * 111100 :	Fix recvmsg.
+ * 050101 :	Fix PADT procesing.
+ * 140501 :	Use pppoe_rcv_core to handle all backlog. (Alexey)
+ * 170701 :	Do not lock_sock with rwlock held. (DaveM)
+ *		Ignore discovery frames if user has socket
+ *		locked. (DaveM)
+ *		Ignore return value of dev_queue_xmit in __pppoe_xmit
+ *		or else we may kfree an SKB twice. (DaveM)
+ * 190701 :	When doing copies of skb's in __pppoe_xmit, always delete
+ *		the original skb that was passed in on success, never on
+ *		failure.  Delete the copy of the skb on failure to avoid
+ *		a memory leak.
+ * 081001 :	Misc. cleanup (licence string, non-blocking, prevent
+ *		reference of device on close).
+ * 121301 :	New ppp channels interface; cannot unregister a channel
+ *		from interrupts.  Thus, we mark the socket as a ZOMBIE
+ *		and do the unregistration later.
+ * 081002 :	seq_file support for proc stuff -acme
+ * 111602 :	Merge all 2.4 fixes into 2.5/2.6 tree.  Label 2.5/2.6
+ *		as version 0.7.  Spacing cleanup.
+ * Author:	Michal Ostrowski <mostrows@speakeasy.net>
+ * Contributors:
+ * 		Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *		David S. Miller (davem@redhat.com)
+ *
+ * License:
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/if_ether.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/notifier.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/sock.h>
+
+#include <linux/uaccess.h>
+
+#define PPPOE_HASH_BITS 4
+#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
+#define PPPOE_HASH_MASK	(PPPOE_HASH_SIZE - 1)
+
+static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
+
+static const struct proto_ops pppoe_ops;
+static const struct ppp_channel_ops pppoe_chan_ops;
+
+/* per-net private data for this module */
+static unsigned int pppoe_net_id __read_mostly;
+struct pppoe_net {
+	/*
+	 * we could use _single_ hash table for all
+	 * nets by injecting net id into the hash but
+	 * it would increase hash chains and add
+	 * a few additional math comparations messy
+	 * as well, moreover in case of SMP less locking
+	 * controversy here
+	 */
+	struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
+	rwlock_t hash_lock;
+};
+
+/*
+ * PPPoE could be in the following stages:
+ * 1) Discovery stage (to obtain remote MAC and Session ID)
+ * 2) Session stage (MAC and SID are known)
+ *
+ * Ethernet frames have a special tag for this but
+ * we use simpler approach based on session id
+ */
+static inline bool stage_session(__be16 sid)
+{
+	return sid != 0;
+}
+
+static inline struct pppoe_net *pppoe_pernet(struct net *net)
+{
+	BUG_ON(!net);
+
+	return net_generic(net, pppoe_net_id);
+}
+
+static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
+{
+	return a->sid == b->sid && ether_addr_equal(a->remote, b->remote);
+}
+
+static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
+{
+	return a->sid == sid && ether_addr_equal(a->remote, addr);
+}
+
+#if 8 % PPPOE_HASH_BITS
+#error 8 must be a multiple of PPPOE_HASH_BITS
+#endif
+
+static int hash_item(__be16 sid, unsigned char *addr)
+{
+	unsigned char hash = 0;
+	unsigned int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hash ^= addr[i];
+	for (i = 0; i < sizeof(sid_t) * 8; i += 8)
+		hash ^= (__force __u32)sid >> i;
+	for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;)
+		hash ^= hash >> i;
+
+	return hash & PPPOE_HASH_MASK;
+}
+
+/**********************************************************************
+ *
+ *  Set/get/delete/rehash items  (internal versions)
+ *
+ **********************************************************************/
+static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
+				unsigned char *addr, int ifindex)
+{
+	int hash = hash_item(sid, addr);
+	struct pppox_sock *ret;
+
+	ret = pn->hash_table[hash];
+	while (ret) {
+		if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
+		    ret->pppoe_ifindex == ifindex)
+			return ret;
+
+		ret = ret->next;
+	}
+
+	return NULL;
+}
+
+static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
+{
+	int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
+	struct pppox_sock *ret;
+
+	ret = pn->hash_table[hash];
+	while (ret) {
+		if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
+		    ret->pppoe_ifindex == po->pppoe_ifindex)
+			return -EALREADY;
+
+		ret = ret->next;
+	}
+
+	po->next = pn->hash_table[hash];
+	pn->hash_table[hash] = po;
+
+	return 0;
+}
+
+static void __delete_item(struct pppoe_net *pn, __be16 sid,
+					char *addr, int ifindex)
+{
+	int hash = hash_item(sid, addr);
+	struct pppox_sock *ret, **src;
+
+	ret = pn->hash_table[hash];
+	src = &pn->hash_table[hash];
+
+	while (ret) {
+		if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
+		    ret->pppoe_ifindex == ifindex) {
+			*src = ret->next;
+			break;
+		}
+
+		src = &ret->next;
+		ret = ret->next;
+	}
+}
+
+/**********************************************************************
+ *
+ *  Set/get/delete/rehash items
+ *
+ **********************************************************************/
+static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
+					unsigned char *addr, int ifindex)
+{
+	struct pppox_sock *po;
+
+	read_lock_bh(&pn->hash_lock);
+	po = __get_item(pn, sid, addr, ifindex);
+	if (po)
+		sock_hold(sk_pppox(po));
+	read_unlock_bh(&pn->hash_lock);
+
+	return po;
+}
+
+static inline struct pppox_sock *get_item_by_addr(struct net *net,
+						struct sockaddr_pppox *sp)
+{
+	struct net_device *dev;
+	struct pppoe_net *pn;
+	struct pppox_sock *pppox_sock = NULL;
+
+	int ifindex;
+
+	rcu_read_lock();
+	dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
+	if (dev) {
+		ifindex = dev->ifindex;
+		pn = pppoe_pernet(net);
+		pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
+				sp->sa_addr.pppoe.remote, ifindex);
+	}
+	rcu_read_unlock();
+	return pppox_sock;
+}
+
+static inline void delete_item(struct pppoe_net *pn, __be16 sid,
+					char *addr, int ifindex)
+{
+	write_lock_bh(&pn->hash_lock);
+	__delete_item(pn, sid, addr, ifindex);
+	write_unlock_bh(&pn->hash_lock);
+}
+
+/***************************************************************************
+ *
+ *  Handler for device events.
+ *  Certain device events require that sockets be unconnected.
+ *
+ **************************************************************************/
+
+static void pppoe_flush_dev(struct net_device *dev)
+{
+	struct pppoe_net *pn;
+	int i;
+
+	pn = pppoe_pernet(dev_net(dev));
+	write_lock_bh(&pn->hash_lock);
+	for (i = 0; i < PPPOE_HASH_SIZE; i++) {
+		struct pppox_sock *po = pn->hash_table[i];
+		struct sock *sk;
+
+		while (po) {
+			while (po && po->pppoe_dev != dev) {
+				po = po->next;
+			}
+
+			if (!po)
+				break;
+
+			sk = sk_pppox(po);
+
+			/* We always grab the socket lock, followed by the
+			 * hash_lock, in that order.  Since we should hold the
+			 * sock lock while doing any unbinding, we need to
+			 * release the lock we're holding.  Hold a reference to
+			 * the sock so it doesn't disappear as we're jumping
+			 * between locks.
+			 */
+
+			sock_hold(sk);
+			write_unlock_bh(&pn->hash_lock);
+			lock_sock(sk);
+
+			if (po->pppoe_dev == dev &&
+			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+				pppox_unbind_sock(sk);
+				sk->sk_state_change(sk);
+				po->pppoe_dev = NULL;
+				dev_put(dev);
+			}
+
+			release_sock(sk);
+			sock_put(sk);
+
+			/* Restart the process from the start of the current
+			 * hash chain. We dropped locks so the world may have
+			 * change from underneath us.
+			 */
+
+			BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
+			write_lock_bh(&pn->hash_lock);
+			po = pn->hash_table[i];
+		}
+	}
+	write_unlock_bh(&pn->hash_lock);
+}
+
+static int pppoe_device_event(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	/* Only look at sockets that are using this specific device. */
+	switch (event) {
+	case NETDEV_CHANGEADDR:
+	case NETDEV_CHANGEMTU:
+		/* A change in mtu or address is a bad thing, requiring
+		 * LCP re-negotiation.
+		 */
+
+	case NETDEV_GOING_DOWN:
+	case NETDEV_DOWN:
+		/* Find every socket on this device and kill it. */
+		pppoe_flush_dev(dev);
+		break;
+
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pppoe_notifier = {
+	.notifier_call = pppoe_device_event,
+};
+
+/************************************************************************
+ *
+ * Do the real work of receiving a PPPoE Session frame.
+ *
+ ***********************************************************************/
+static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+{
+	struct pppox_sock *po = pppox_sk(sk);
+	struct pppox_sock *relay_po;
+
+	/* Backlog receive. Semantics of backlog rcv preclude any code from
+	 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
+	 * can't change.
+	 */
+
+	if (skb->pkt_type == PACKET_OTHERHOST)
+		goto abort_kfree;
+
+	if (sk->sk_state & PPPOX_BOUND) {
+		ppp_input(&po->chan, skb);
+	} else if (sk->sk_state & PPPOX_RELAY) {
+		relay_po = get_item_by_addr(sock_net(sk),
+					    &po->pppoe_relay);
+		if (relay_po == NULL)
+			goto abort_kfree;
+
+		if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
+			goto abort_put;
+
+		if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+			goto abort_put;
+
+		sock_put(sk_pppox(relay_po));
+	} else {
+		if (sock_queue_rcv_skb(sk, skb))
+			goto abort_kfree;
+	}
+
+	return NET_RX_SUCCESS;
+
+abort_put:
+	sock_put(sk_pppox(relay_po));
+
+abort_kfree:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+/************************************************************************
+ *
+ * Receive wrapper called in BH context.
+ *
+ ***********************************************************************/
+static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
+		     struct packet_type *pt, struct net_device *orig_dev)
+{
+	struct pppoe_hdr *ph;
+	struct pppox_sock *po;
+	struct pppoe_net *pn;
+	int len;
+
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
+	if (skb_mac_header_len(skb) < ETH_HLEN)
+		goto drop;
+
+	if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+		goto drop;
+
+	ph = pppoe_hdr(skb);
+	len = ntohs(ph->length);
+
+	skb_pull_rcsum(skb, sizeof(*ph));
+	if (skb->len < len)
+		goto drop;
+
+	if (pskb_trim_rcsum(skb, len))
+		goto drop;
+
+	pn = pppoe_pernet(dev_net(dev));
+
+	/* Note that get_item does a sock_hold(), so sk_pppox(po)
+	 * is known to be safe.
+	 */
+	po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+	if (!po)
+		goto drop;
+
+	return sk_receive_skb(sk_pppox(po), skb, 0);
+
+drop:
+	kfree_skb(skb);
+out:
+	return NET_RX_DROP;
+}
+
+static void pppoe_unbind_sock_work(struct work_struct *work)
+{
+	struct pppox_sock *po = container_of(work, struct pppox_sock,
+					     proto.pppoe.padt_work);
+	struct sock *sk = sk_pppox(po);
+
+	lock_sock(sk);
+	if (po->pppoe_dev) {
+		dev_put(po->pppoe_dev);
+		po->pppoe_dev = NULL;
+	}
+	pppox_unbind_sock(sk);
+	release_sock(sk);
+	sock_put(sk);
+}
+
+/************************************************************************
+ *
+ * Receive a PPPoE Discovery frame.
+ * This is solely for detection of PADT frames
+ *
+ ***********************************************************************/
+static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
+			  struct packet_type *pt, struct net_device *orig_dev)
+
+{
+	struct pppoe_hdr *ph;
+	struct pppox_sock *po;
+	struct pppoe_net *pn;
+
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
+	if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+		goto abort;
+
+	ph = pppoe_hdr(skb);
+	if (ph->code != PADT_CODE)
+		goto abort;
+
+	pn = pppoe_pernet(dev_net(dev));
+	po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+	if (po)
+		if (!schedule_work(&po->proto.pppoe.padt_work))
+			sock_put(sk_pppox(po));
+
+abort:
+	kfree_skb(skb);
+out:
+	return NET_RX_SUCCESS; /* Lies... :-) */
+}
+
+static struct packet_type pppoes_ptype __read_mostly = {
+	.type	= cpu_to_be16(ETH_P_PPP_SES),
+	.func	= pppoe_rcv,
+};
+
+static struct packet_type pppoed_ptype __read_mostly = {
+	.type	= cpu_to_be16(ETH_P_PPP_DISC),
+	.func	= pppoe_disc_rcv,
+};
+
+static struct proto pppoe_sk_proto __read_mostly = {
+	.name	  = "PPPOE",
+	.owner	  = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+/***********************************************************************
+ *
+ * Initialize a new struct sock.
+ *
+ **********************************************************************/
+static int pppoe_create(struct net *net, struct socket *sock, int kern)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+
+	sock->state	= SS_UNCONNECTED;
+	sock->ops	= &pppoe_ops;
+
+	sk->sk_backlog_rcv	= pppoe_rcv_core;
+	sk->sk_state		= PPPOX_NONE;
+	sk->sk_type		= SOCK_STREAM;
+	sk->sk_family		= PF_PPPOX;
+	sk->sk_protocol		= PX_PROTO_OE;
+
+	INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
+		  pppoe_unbind_sock_work);
+
+	return 0;
+}
+
+static int pppoe_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po;
+	struct pppoe_net *pn;
+	struct net *net = NULL;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	po = pppox_sk(sk);
+
+	if (po->pppoe_dev) {
+		dev_put(po->pppoe_dev);
+		po->pppoe_dev = NULL;
+	}
+
+	pppox_unbind_sock(sk);
+
+	/* Signal the death of the socket. */
+	sk->sk_state = PPPOX_DEAD;
+
+	net = sock_net(sk);
+	pn = pppoe_pernet(net);
+
+	/*
+	 * protect "po" from concurrent updates
+	 * on pppoe_flush_dev
+	 */
+	delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
+		    po->pppoe_ifindex);
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+
+	skb_queue_purge(&sk->sk_receive_queue);
+	release_sock(sk);
+	sock_put(sk);
+
+	return 0;
+}
+
+static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+		  int sockaddr_len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct net_device *dev = NULL;
+	struct pppoe_net *pn;
+	struct net *net = NULL;
+	int error;
+
+	lock_sock(sk);
+
+	error = -EINVAL;
+
+	if (sockaddr_len != sizeof(struct sockaddr_pppox))
+		goto end;
+
+	if (sp->sa_protocol != PX_PROTO_OE)
+		goto end;
+
+	/* Check for already bound sockets */
+	error = -EBUSY;
+	if ((sk->sk_state & PPPOX_CONNECTED) &&
+	     stage_session(sp->sa_addr.pppoe.sid))
+		goto end;
+
+	/* Check for already disconnected sockets, on attempts to disconnect */
+	error = -EALREADY;
+	if ((sk->sk_state & PPPOX_DEAD) &&
+	     !stage_session(sp->sa_addr.pppoe.sid))
+		goto end;
+
+	error = 0;
+
+	/* Delete the old binding */
+	if (stage_session(po->pppoe_pa.sid)) {
+		pppox_unbind_sock(sk);
+		pn = pppoe_pernet(sock_net(sk));
+		delete_item(pn, po->pppoe_pa.sid,
+			    po->pppoe_pa.remote, po->pppoe_ifindex);
+		if (po->pppoe_dev) {
+			dev_put(po->pppoe_dev);
+			po->pppoe_dev = NULL;
+		}
+
+		po->pppoe_ifindex = 0;
+		memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
+		memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
+		memset(&po->chan, 0, sizeof(po->chan));
+		po->next = NULL;
+		po->num = 0;
+
+		sk->sk_state = PPPOX_NONE;
+	}
+
+	/* Re-bind in session stage only */
+	if (stage_session(sp->sa_addr.pppoe.sid)) {
+		error = -ENODEV;
+		net = sock_net(sk);
+		dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
+		if (!dev)
+			goto err_put;
+
+		po->pppoe_dev = dev;
+		po->pppoe_ifindex = dev->ifindex;
+		pn = pppoe_pernet(net);
+		if (!(dev->flags & IFF_UP)) {
+			goto err_put;
+		}
+
+		memcpy(&po->pppoe_pa,
+		       &sp->sa_addr.pppoe,
+		       sizeof(struct pppoe_addr));
+
+		write_lock_bh(&pn->hash_lock);
+		error = __set_item(pn, po);
+		write_unlock_bh(&pn->hash_lock);
+		if (error < 0)
+			goto err_put;
+
+		po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
+				   dev->hard_header_len);
+
+		po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
+		po->chan.private = sk;
+		po->chan.ops = &pppoe_chan_ops;
+
+		error = ppp_register_net_channel(dev_net(dev), &po->chan);
+		if (error) {
+			delete_item(pn, po->pppoe_pa.sid,
+				    po->pppoe_pa.remote, po->pppoe_ifindex);
+			goto err_put;
+		}
+
+		sk->sk_state = PPPOX_CONNECTED;
+	}
+
+	po->num = sp->sa_addr.pppoe.sid;
+
+end:
+	release_sock(sk);
+	return error;
+err_put:
+	if (po->pppoe_dev) {
+		dev_put(po->pppoe_dev);
+		po->pppoe_dev = NULL;
+	}
+	goto end;
+}
+
+static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
+		  int peer)
+{
+	int len = sizeof(struct sockaddr_pppox);
+	struct sockaddr_pppox sp;
+
+	sp.sa_family	= AF_PPPOX;
+	sp.sa_protocol	= PX_PROTO_OE;
+	memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa,
+	       sizeof(struct pppoe_addr));
+
+	memcpy(uaddr, &sp, len);
+
+	return len;
+}
+
+static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
+		unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	int val;
+	int err;
+
+	switch (cmd) {
+	case PPPIOCGMRU:
+		err = -ENXIO;
+		if (!(sk->sk_state & PPPOX_CONNECTED))
+			break;
+
+		err = -EFAULT;
+		if (put_user(po->pppoe_dev->mtu -
+			     sizeof(struct pppoe_hdr) -
+			     PPP_HDRLEN,
+			     (int __user *)arg))
+			break;
+		err = 0;
+		break;
+
+	case PPPIOCSMRU:
+		err = -ENXIO;
+		if (!(sk->sk_state & PPPOX_CONNECTED))
+			break;
+
+		err = -EFAULT;
+		if (get_user(val, (int __user *)arg))
+			break;
+
+		if (val < (po->pppoe_dev->mtu
+			   - sizeof(struct pppoe_hdr)
+			   - PPP_HDRLEN))
+			err = 0;
+		else
+			err = -EINVAL;
+		break;
+
+	case PPPIOCSFLAGS:
+		err = -EFAULT;
+		if (get_user(val, (int __user *)arg))
+			break;
+		err = 0;
+		break;
+
+	case PPPOEIOCSFWD:
+	{
+		struct pppox_sock *relay_po;
+
+		err = -EBUSY;
+		if (sk->sk_state & (PPPOX_BOUND | PPPOX_DEAD))
+			break;
+
+		err = -ENOTCONN;
+		if (!(sk->sk_state & PPPOX_CONNECTED))
+			break;
+
+		/* PPPoE address from the user specifies an outbound
+		   PPPoE address which frames are forwarded to */
+		err = -EFAULT;
+		if (copy_from_user(&po->pppoe_relay,
+				   (void __user *)arg,
+				   sizeof(struct sockaddr_pppox)))
+			break;
+
+		err = -EINVAL;
+		if (po->pppoe_relay.sa_family != AF_PPPOX ||
+		    po->pppoe_relay.sa_protocol != PX_PROTO_OE)
+			break;
+
+		/* Check that the socket referenced by the address
+		   actually exists. */
+		relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+		if (!relay_po)
+			break;
+
+		sock_put(sk_pppox(relay_po));
+		sk->sk_state |= PPPOX_RELAY;
+		err = 0;
+		break;
+	}
+
+	case PPPOEIOCDFWD:
+		err = -EALREADY;
+		if (!(sk->sk_state & PPPOX_RELAY))
+			break;
+
+		sk->sk_state &= ~PPPOX_RELAY;
+		err = 0;
+		break;
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+			 size_t total_len)
+{
+	struct sk_buff *skb;
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	int error;
+	struct pppoe_hdr hdr;
+	struct pppoe_hdr *ph;
+	struct net_device *dev;
+	char *start;
+	int hlen;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
+		error = -ENOTCONN;
+		goto end;
+	}
+
+	hdr.ver = 1;
+	hdr.type = 1;
+	hdr.code = 0;
+	hdr.sid = po->num;
+
+	dev = po->pppoe_dev;
+
+	error = -EMSGSIZE;
+	if (total_len > (dev->mtu + dev->hard_header_len))
+		goto end;
+
+	hlen = LL_RESERVED_SPACE(dev);
+	skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+			   dev->needed_tailroom, 0, GFP_KERNEL);
+	if (!skb) {
+		error = -ENOMEM;
+		goto end;
+	}
+
+	/* Reserve space for headers. */
+	skb_reserve(skb, hlen);
+	skb_reset_network_header(skb);
+
+	skb->dev = dev;
+
+	skb->priority = sk->sk_priority;
+	skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
+
+	ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr));
+	start = (char *)&ph->tag[0];
+
+	error = memcpy_from_msg(start, m, total_len);
+	if (error < 0) {
+		kfree_skb(skb);
+		goto end;
+	}
+
+	error = total_len;
+	dev_hard_header(skb, dev, ETH_P_PPP_SES,
+			po->pppoe_pa.remote, NULL, total_len);
+
+	memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
+
+	ph->length = htons(total_len);
+
+	dev_queue_xmit(skb);
+
+end:
+	release_sock(sk);
+	return error;
+}
+
+/************************************************************************
+ *
+ * xmit function for internal use.
+ *
+ ***********************************************************************/
+static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
+{
+	struct pppox_sock *po = pppox_sk(sk);
+	struct net_device *dev = po->pppoe_dev;
+	struct pppoe_hdr *ph;
+	int data_len = skb->len;
+
+	/* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
+	 * xmit operations conclude prior to an unregistration call.  Thus
+	 * sk->sk_state cannot change, so we don't need to do lock_sock().
+	 * But, we also can't do a lock_sock since that introduces a potential
+	 * deadlock as we'd reverse the lock ordering used when calling
+	 * ppp_unregister_channel().
+	 */
+
+	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+		goto abort;
+
+	if (!dev)
+		goto abort;
+
+	/* Copy the data if there is no space for the header or if it's
+	 * read-only.
+	 */
+	if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
+		goto abort;
+
+	__skb_push(skb, sizeof(*ph));
+	skb_reset_network_header(skb);
+
+	ph = pppoe_hdr(skb);
+	ph->ver	= 1;
+	ph->type = 1;
+	ph->code = 0;
+	ph->sid	= po->num;
+	ph->length = htons(data_len);
+
+	skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
+	skb->dev = dev;
+
+	dev_hard_header(skb, dev, ETH_P_PPP_SES,
+			po->pppoe_pa.remote, NULL, data_len);
+
+	dev_queue_xmit(skb);
+	return 1;
+
+abort:
+	kfree_skb(skb);
+	return 1;
+}
+
+/************************************************************************
+ *
+ * xmit function called by generic PPP driver
+ * sends PPP frame over PPPoE socket
+ *
+ ***********************************************************************/
+static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)chan->private;
+	return __pppoe_xmit(sk, skb);
+}
+
+static const struct ppp_channel_ops pppoe_chan_ops = {
+	.start_xmit = pppoe_xmit,
+};
+
+static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
+			 size_t total_len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct sk_buff *skb;
+	int error = 0;
+
+	if (sk->sk_state & PPPOX_BOUND) {
+		error = -EIO;
+		goto end;
+	}
+
+	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+				flags & MSG_DONTWAIT, &error);
+	if (error < 0)
+		goto end;
+
+	if (skb) {
+		total_len = min_t(size_t, total_len, skb->len);
+		error = skb_copy_datagram_msg(skb, 0, m, total_len);
+		if (error == 0) {
+			consume_skb(skb);
+			return total_len;
+		}
+	}
+
+	kfree_skb(skb);
+end:
+	return error;
+}
+
+#ifdef CONFIG_PROC_FS
+static int pppoe_seq_show(struct seq_file *seq, void *v)
+{
+	struct pppox_sock *po;
+	char *dev_name;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Id       Address              Device\n");
+		goto out;
+	}
+
+	po = v;
+	dev_name = po->pppoe_pa.dev;
+
+	seq_printf(seq, "%08X %pM %8s\n",
+		po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
+out:
+	return 0;
+}
+
+static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
+{
+	struct pppox_sock *po;
+	int i;
+
+	for (i = 0; i < PPPOE_HASH_SIZE; i++) {
+		po = pn->hash_table[i];
+		while (po) {
+			if (!pos--)
+				goto out;
+			po = po->next;
+		}
+	}
+
+out:
+	return po;
+}
+
+static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
+	__acquires(pn->hash_lock)
+{
+	struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
+	loff_t l = *pos;
+
+	read_lock_bh(&pn->hash_lock);
+	return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
+}
+
+static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
+	struct pppox_sock *po;
+
+	++*pos;
+	if (v == SEQ_START_TOKEN) {
+		po = pppoe_get_idx(pn, 0);
+		goto out;
+	}
+	po = v;
+	if (po->next)
+		po = po->next;
+	else {
+		int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
+
+		po = NULL;
+		while (++hash < PPPOE_HASH_SIZE) {
+			po = pn->hash_table[hash];
+			if (po)
+				break;
+		}
+	}
+
+out:
+	return po;
+}
+
+static void pppoe_seq_stop(struct seq_file *seq, void *v)
+	__releases(pn->hash_lock)
+{
+	struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
+	read_unlock_bh(&pn->hash_lock);
+}
+
+static const struct seq_operations pppoe_seq_ops = {
+	.start		= pppoe_seq_start,
+	.next		= pppoe_seq_next,
+	.stop		= pppoe_seq_stop,
+	.show		= pppoe_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
+
+static const struct proto_ops pppoe_ops = {
+	.family		= AF_PPPOX,
+	.owner		= THIS_MODULE,
+	.release	= pppoe_release,
+	.bind		= sock_no_bind,
+	.connect	= pppoe_connect,
+	.socketpair	= sock_no_socketpair,
+	.accept		= sock_no_accept,
+	.getname	= pppoe_getname,
+	.poll		= datagram_poll,
+	.listen		= sock_no_listen,
+	.shutdown	= sock_no_shutdown,
+	.setsockopt	= sock_no_setsockopt,
+	.getsockopt	= sock_no_getsockopt,
+	.sendmsg	= pppoe_sendmsg,
+	.recvmsg	= pppoe_recvmsg,
+	.mmap		= sock_no_mmap,
+	.ioctl		= pppox_ioctl,
+};
+
+static const struct pppox_proto pppoe_proto = {
+	.create	= pppoe_create,
+	.ioctl	= pppoe_ioctl,
+	.owner	= THIS_MODULE,
+};
+
+static __net_init int pppoe_init_net(struct net *net)
+{
+	struct pppoe_net *pn = pppoe_pernet(net);
+	struct proc_dir_entry *pde;
+
+	rwlock_init(&pn->hash_lock);
+
+	pde = proc_create_net("pppoe", 0444, net->proc_net,
+			&pppoe_seq_ops, sizeof(struct seq_net_private));
+#ifdef CONFIG_PROC_FS
+	if (!pde)
+		return -ENOMEM;
+#endif
+
+	return 0;
+}
+
+static __net_exit void pppoe_exit_net(struct net *net)
+{
+	remove_proc_entry("pppoe", net->proc_net);
+}
+
+static struct pernet_operations pppoe_net_ops = {
+	.init = pppoe_init_net,
+	.exit = pppoe_exit_net,
+	.id   = &pppoe_net_id,
+	.size = sizeof(struct pppoe_net),
+};
+
+static int __init pppoe_init(void)
+{
+	int err;
+
+	err = register_pernet_device(&pppoe_net_ops);
+	if (err)
+		goto out;
+
+	err = proto_register(&pppoe_sk_proto, 0);
+	if (err)
+		goto out_unregister_net_ops;
+
+	err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
+	if (err)
+		goto out_unregister_pppoe_proto;
+
+	dev_add_pack(&pppoes_ptype);
+	dev_add_pack(&pppoed_ptype);
+	register_netdevice_notifier(&pppoe_notifier);
+
+	return 0;
+
+out_unregister_pppoe_proto:
+	proto_unregister(&pppoe_sk_proto);
+out_unregister_net_ops:
+	unregister_pernet_device(&pppoe_net_ops);
+out:
+	return err;
+}
+
+static void __exit pppoe_exit(void)
+{
+	unregister_netdevice_notifier(&pppoe_notifier);
+	dev_remove_pack(&pppoed_ptype);
+	dev_remove_pack(&pppoes_ptype);
+	unregister_pppox_proto(PX_PROTO_OE);
+	proto_unregister(&pppoe_sk_proto);
+	unregister_pernet_device(&pppoe_net_ops);
+}
+
+module_init(pppoe_init);
+module_exit(pppoe_exit);
+
+MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>");
+MODULE_DESCRIPTION("PPP over Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OE);
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
new file mode 100644
index 0000000..c0599b3
--- /dev/null
+++ b/drivers/net/ppp/pppox.c
@@ -0,0 +1,150 @@
+/** -*- linux-c -*- ***********************************************************
+ * Linux PPP over X/Ethernet (PPPoX/PPPoE) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoE --- PPP over Ethernet (RFC 2516)
+ *
+ *
+ * Version:	0.5.2
+ *
+ * Author:	Michal Ostrowski <mostrows@speakeasy.net>
+ *
+ * 051000 :	Initialization cleanup
+ *
+ * License:
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/init.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/ppp_channel.h>
+#include <linux/kmod.h>
+
+#include <net/sock.h>
+
+#include <linux/uaccess.h>
+
+static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
+
+int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
+{
+	if (proto_num < 0 || proto_num > PX_MAX_PROTO)
+		return -EINVAL;
+	if (pppox_protos[proto_num])
+		return -EALREADY;
+	pppox_protos[proto_num] = pp;
+	return 0;
+}
+
+void unregister_pppox_proto(int proto_num)
+{
+	if (proto_num >= 0 && proto_num <= PX_MAX_PROTO)
+		pppox_protos[proto_num] = NULL;
+}
+
+void pppox_unbind_sock(struct sock *sk)
+{
+	/* Clear connection to ppp device, if attached. */
+
+	if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED)) {
+		ppp_unregister_channel(&pppox_sk(sk)->chan);
+		sk->sk_state = PPPOX_DEAD;
+	}
+}
+
+EXPORT_SYMBOL(register_pppox_proto);
+EXPORT_SYMBOL(unregister_pppox_proto);
+EXPORT_SYMBOL(pppox_unbind_sock);
+
+int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	int rc;
+
+	lock_sock(sk);
+
+	switch (cmd) {
+	case PPPIOCGCHAN: {
+		int index;
+		rc = -ENOTCONN;
+		if (!(sk->sk_state & PPPOX_CONNECTED))
+			break;
+
+		rc = -EINVAL;
+		index = ppp_channel_index(&po->chan);
+		if (put_user(index , (int __user *) arg))
+			break;
+
+		rc = 0;
+		sk->sk_state |= PPPOX_BOUND;
+		break;
+	}
+	default:
+		rc = pppox_protos[sk->sk_protocol]->ioctl ?
+			pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY;
+	}
+
+	release_sock(sk);
+	return rc;
+}
+
+EXPORT_SYMBOL(pppox_ioctl);
+
+static int pppox_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
+{
+	int rc = -EPROTOTYPE;
+
+	if (protocol < 0 || protocol > PX_MAX_PROTO)
+		goto out;
+
+	rc = -EPROTONOSUPPORT;
+	if (!pppox_protos[protocol])
+		request_module("net-pf-%d-proto-%d", PF_PPPOX, protocol);
+	if (!pppox_protos[protocol] ||
+	    !try_module_get(pppox_protos[protocol]->owner))
+		goto out;
+
+	rc = pppox_protos[protocol]->create(net, sock, kern);
+
+	module_put(pppox_protos[protocol]->owner);
+out:
+	return rc;
+}
+
+static const struct net_proto_family pppox_proto_family = {
+	.family	= PF_PPPOX,
+	.create	= pppox_create,
+	.owner	= THIS_MODULE,
+};
+
+static int __init pppox_init(void)
+{
+	return sock_register(&pppox_proto_family);
+}
+
+static void __exit pppox_exit(void)
+{
+	sock_unregister(PF_PPPOX);
+}
+
+module_init(pppox_init);
+module_exit(pppox_exit);
+
+MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>");
+MODULE_DESCRIPTION("PPP over Ethernet driver (generic socket layer)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_PPPOX);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
new file mode 100644
index 0000000..67ffe74
--- /dev/null
+++ b/drivers/net/ppp/pptp.c
@@ -0,0 +1,699 @@
+/*
+ *  Point-to-Point Tunneling Protocol for Linux
+ *
+ *	Authors: Dmitry Kozlov <xeb@mail.ru>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/notifier.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/gre.h>
+#include <net/pptp.h>
+
+#include <linux/uaccess.h>
+
+#define PPTP_DRIVER_VERSION "0.8.5"
+
+#define MAX_CALLID 65535
+
+static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
+static struct pppox_sock __rcu **callid_sock;
+
+static DEFINE_SPINLOCK(chan_lock);
+
+static struct proto pptp_sk_proto __read_mostly;
+static const struct ppp_channel_ops pptp_chan_ops;
+static const struct proto_ops pptp_ops;
+
+static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
+{
+	struct pppox_sock *sock;
+	struct pptp_opt *opt;
+
+	rcu_read_lock();
+	sock = rcu_dereference(callid_sock[call_id]);
+	if (sock) {
+		opt = &sock->proto.pptp;
+		if (opt->dst_addr.sin_addr.s_addr != s_addr)
+			sock = NULL;
+		else
+			sock_hold(sk_pppox(sock));
+	}
+	rcu_read_unlock();
+
+	return sock;
+}
+
+static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+{
+	struct pppox_sock *sock;
+	struct pptp_opt *opt;
+	int i;
+
+	rcu_read_lock();
+	i = 1;
+	for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+		sock = rcu_dereference(callid_sock[i]);
+		if (!sock)
+			continue;
+		opt = &sock->proto.pptp;
+		if (opt->dst_addr.call_id == call_id &&
+			  opt->dst_addr.sin_addr.s_addr == d_addr)
+			break;
+	}
+	rcu_read_unlock();
+
+	return i < MAX_CALLID;
+}
+
+static int add_chan(struct pppox_sock *sock,
+		    struct pptp_addr *sa)
+{
+	static int call_id;
+
+	spin_lock(&chan_lock);
+	if (!sa->call_id)	{
+		call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+		if (call_id == MAX_CALLID) {
+			call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+			if (call_id == MAX_CALLID)
+				goto out_err;
+		}
+		sa->call_id = call_id;
+	} else if (test_bit(sa->call_id, callid_bitmap)) {
+		goto out_err;
+	}
+
+	sock->proto.pptp.src_addr = *sa;
+	set_bit(sa->call_id, callid_bitmap);
+	rcu_assign_pointer(callid_sock[sa->call_id], sock);
+	spin_unlock(&chan_lock);
+
+	return 0;
+
+out_err:
+	spin_unlock(&chan_lock);
+	return -1;
+}
+
+static void del_chan(struct pppox_sock *sock)
+{
+	spin_lock(&chan_lock);
+	clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+	RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+	spin_unlock(&chan_lock);
+}
+
+static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *) chan->private;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct net *net = sock_net(sk);
+	struct pptp_opt *opt = &po->proto.pptp;
+	struct pptp_gre_header *hdr;
+	unsigned int header_len = sizeof(*hdr);
+	struct flowi4 fl4;
+	int islcp;
+	int len;
+	unsigned char *data;
+	__u32 seq_recv;
+
+
+	struct rtable *rt;
+	struct net_device *tdev;
+	struct iphdr  *iph;
+	int    max_headroom;
+
+	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+		goto tx_error;
+
+	rt = ip_route_output_ports(net, &fl4, NULL,
+				   opt->dst_addr.sin_addr.s_addr,
+				   opt->src_addr.sin_addr.s_addr,
+				   0, 0, IPPROTO_GRE,
+				   RT_TOS(0), 0);
+	if (IS_ERR(rt))
+		goto tx_error;
+
+	tdev = rt->dst.dev;
+
+	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
+
+	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+		if (!new_skb) {
+			ip_rt_put(rt);
+			goto tx_error;
+		}
+		if (skb->sk)
+			skb_set_owner_w(new_skb, skb->sk);
+		consume_skb(skb);
+		skb = new_skb;
+	}
+
+	data = skb->data;
+	islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+	/* compress protocol field */
+	if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
+		skb_pull(skb, 1);
+
+	/* Put in the address/control bytes if necessary */
+	if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
+		data = skb_push(skb, 2);
+		data[0] = PPP_ALLSTATIONS;
+		data[1] = PPP_UI;
+	}
+
+	len = skb->len;
+
+	seq_recv = opt->seq_recv;
+
+	if (opt->ack_sent == seq_recv)
+		header_len -= sizeof(hdr->ack);
+
+	/* Push down and install GRE header */
+	skb_push(skb, header_len);
+	hdr = (struct pptp_gre_header *)(skb->data);
+
+	hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
+	hdr->gre_hd.protocol = GRE_PROTO_PPP;
+	hdr->call_id = htons(opt->dst_addr.call_id);
+
+	hdr->seq = htonl(++opt->seq_sent);
+	if (opt->ack_sent != seq_recv)	{
+		/* send ack with this message */
+		hdr->gre_hd.flags |= GRE_ACK;
+		hdr->ack  = htonl(seq_recv);
+		opt->ack_sent = seq_recv;
+	}
+	hdr->payload_len = htons(len);
+
+	/*	Push down and install the IP header. */
+
+	skb_reset_transport_header(skb);
+	skb_push(skb, sizeof(*iph));
+	skb_reset_network_header(skb);
+	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+
+	iph =	ip_hdr(skb);
+	iph->version =	4;
+	iph->ihl =	sizeof(struct iphdr) >> 2;
+	if (ip_dont_fragment(sk, &rt->dst))
+		iph->frag_off	=	htons(IP_DF);
+	else
+		iph->frag_off	=	0;
+	iph->protocol = IPPROTO_GRE;
+	iph->tos      = 0;
+	iph->daddr    = fl4.daddr;
+	iph->saddr    = fl4.saddr;
+	iph->ttl      = ip4_dst_hoplimit(&rt->dst);
+	iph->tot_len  = htons(skb->len);
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, &rt->dst);
+
+	nf_reset(skb);
+
+	skb->ip_summed = CHECKSUM_NONE;
+	ip_select_ident(net, skb, NULL);
+	ip_send_check(iph);
+
+	ip_local_out(net, skb->sk, skb);
+	return 1;
+
+tx_error:
+	kfree_skb(skb);
+	return 1;
+}
+
+static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
+{
+	struct pppox_sock *po = pppox_sk(sk);
+	struct pptp_opt *opt = &po->proto.pptp;
+	int headersize, payload_len, seq;
+	__u8 *payload;
+	struct pptp_gre_header *header;
+
+	if (!(sk->sk_state & PPPOX_CONNECTED)) {
+		if (sock_queue_rcv_skb(sk, skb))
+			goto drop;
+		return NET_RX_SUCCESS;
+	}
+
+	header = (struct pptp_gre_header *)(skb->data);
+	headersize  = sizeof(*header);
+
+	/* test if acknowledgement present */
+	if (GRE_IS_ACK(header->gre_hd.flags)) {
+		__u32 ack;
+
+		if (!pskb_may_pull(skb, headersize))
+			goto drop;
+		header = (struct pptp_gre_header *)(skb->data);
+
+		/* ack in different place if S = 0 */
+		ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
+
+		ack = ntohl(ack);
+
+		if (ack > opt->ack_recv)
+			opt->ack_recv = ack;
+		/* also handle sequence number wrap-around  */
+		if (WRAPPED(ack, opt->ack_recv))
+			opt->ack_recv = ack;
+	} else {
+		headersize -= sizeof(header->ack);
+	}
+	/* test if payload present */
+	if (!GRE_IS_SEQ(header->gre_hd.flags))
+		goto drop;
+
+	payload_len = ntohs(header->payload_len);
+	seq         = ntohl(header->seq);
+
+	/* check for incomplete packet (length smaller than expected) */
+	if (!pskb_may_pull(skb, headersize + payload_len))
+		goto drop;
+
+	payload = skb->data + headersize;
+	/* check for expected sequence number */
+	if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
+		if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
+				(PPP_PROTOCOL(payload) == PPP_LCP) &&
+				((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
+			goto allow_packet;
+	} else {
+		opt->seq_recv = seq;
+allow_packet:
+		skb_pull(skb, headersize);
+
+		if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
+			/* chop off address/control */
+			if (skb->len < 3)
+				goto drop;
+			skb_pull(skb, 2);
+		}
+
+		if ((*skb->data) & 1) {
+			/* protocol is compressed */
+			*(u8 *)skb_push(skb, 1) = 0;
+		}
+
+		skb->ip_summed = CHECKSUM_NONE;
+		skb_set_network_header(skb, skb->head-skb->data);
+		ppp_input(&po->chan, skb);
+
+		return NET_RX_SUCCESS;
+	}
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static int pptp_rcv(struct sk_buff *skb)
+{
+	struct pppox_sock *po;
+	struct pptp_gre_header *header;
+	struct iphdr *iph;
+
+	if (skb->pkt_type != PACKET_HOST)
+		goto drop;
+
+	if (!pskb_may_pull(skb, 12))
+		goto drop;
+
+	iph = ip_hdr(skb);
+
+	header = (struct pptp_gre_header *)skb->data;
+
+	if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
+		GRE_IS_CSUM(header->gre_hd.flags) ||    /* flag CSUM should be clear */
+		GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
+		!GRE_IS_KEY(header->gre_hd.flags) ||    /* flag KEY should be set */
+		(header->gre_hd.flags & GRE_FLAGS))     /* flag Recursion Ctrl should be clear */
+		/* if invalid, discard this packet */
+		goto drop;
+
+	po = lookup_chan(htons(header->call_id), iph->saddr);
+	if (po) {
+		skb_dst_drop(skb);
+		nf_reset(skb);
+		return sk_receive_skb(sk_pppox(po), skb, 0);
+	}
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+	int sockaddr_len)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+	struct pppox_sock *po = pppox_sk(sk);
+	int error = 0;
+
+	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+		return -EINVAL;
+
+	lock_sock(sk);
+
+	if (sk->sk_state & PPPOX_DEAD) {
+		error = -EALREADY;
+		goto out;
+	}
+
+	if (sk->sk_state & PPPOX_BOUND) {
+		error = -EBUSY;
+		goto out;
+	}
+
+	if (add_chan(po, &sp->sa_addr.pptp))
+		error = -EBUSY;
+	else
+		sk->sk_state |= PPPOX_BOUND;
+
+out:
+	release_sock(sk);
+	return error;
+}
+
+static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+	int sockaddr_len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct pptp_opt *opt = &po->proto.pptp;
+	struct rtable *rt;
+	struct flowi4 fl4;
+	int error = 0;
+
+	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+		return -EINVAL;
+
+	if (sp->sa_protocol != PX_PROTO_PPTP)
+		return -EINVAL;
+
+	if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
+		return -EALREADY;
+
+	lock_sock(sk);
+	/* Check for already bound sockets */
+	if (sk->sk_state & PPPOX_CONNECTED) {
+		error = -EBUSY;
+		goto end;
+	}
+
+	/* Check for already disconnected sockets, on attempts to disconnect */
+	if (sk->sk_state & PPPOX_DEAD) {
+		error = -EALREADY;
+		goto end;
+	}
+
+	if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
+		error = -EINVAL;
+		goto end;
+	}
+
+	po->chan.private = sk;
+	po->chan.ops = &pptp_chan_ops;
+
+	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
+				   opt->dst_addr.sin_addr.s_addr,
+				   opt->src_addr.sin_addr.s_addr,
+				   0, 0,
+				   IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+	if (IS_ERR(rt)) {
+		error = -EHOSTUNREACH;
+		goto end;
+	}
+	sk_setup_caps(sk, &rt->dst);
+
+	po->chan.mtu = dst_mtu(&rt->dst);
+	if (!po->chan.mtu)
+		po->chan.mtu = PPP_MRU;
+	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
+
+	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+	error = ppp_register_channel(&po->chan);
+	if (error) {
+		pr_err("PPTP: failed to register PPP channel (%d)\n", error);
+		goto end;
+	}
+
+	opt->dst_addr = sp->sa_addr.pptp;
+	sk->sk_state |= PPPOX_CONNECTED;
+
+ end:
+	release_sock(sk);
+	return error;
+}
+
+static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
+	int peer)
+{
+	int len = sizeof(struct sockaddr_pppox);
+	struct sockaddr_pppox sp;
+
+	memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
+
+	sp.sa_family    = AF_PPPOX;
+	sp.sa_protocol  = PX_PROTO_PPTP;
+	sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
+
+	memcpy(uaddr, &sp, len);
+
+	return len;
+}
+
+static int pptp_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po;
+	int error = 0;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	po = pppox_sk(sk);
+	del_chan(po);
+	synchronize_rcu();
+
+	pppox_unbind_sock(sk);
+	sk->sk_state = PPPOX_DEAD;
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+
+	release_sock(sk);
+	sock_put(sk);
+
+	return error;
+}
+
+static void pptp_sock_destruct(struct sock *sk)
+{
+	if (!(sk->sk_state & PPPOX_DEAD)) {
+		del_chan(pppox_sk(sk));
+		pppox_unbind_sock(sk);
+	}
+	skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static int pptp_create(struct net *net, struct socket *sock, int kern)
+{
+	int error = -ENOMEM;
+	struct sock *sk;
+	struct pppox_sock *po;
+	struct pptp_opt *opt;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
+	if (!sk)
+		goto out;
+
+	sock_init_data(sock, sk);
+
+	sock->state = SS_UNCONNECTED;
+	sock->ops   = &pptp_ops;
+
+	sk->sk_backlog_rcv = pptp_rcv_core;
+	sk->sk_state       = PPPOX_NONE;
+	sk->sk_type        = SOCK_STREAM;
+	sk->sk_family      = PF_PPPOX;
+	sk->sk_protocol    = PX_PROTO_PPTP;
+	sk->sk_destruct    = pptp_sock_destruct;
+
+	po = pppox_sk(sk);
+	opt = &po->proto.pptp;
+
+	opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
+	opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
+
+	error = 0;
+out:
+	return error;
+}
+
+static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+	unsigned long arg)
+{
+	struct sock *sk = (struct sock *) chan->private;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct pptp_opt *opt = &po->proto.pptp;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int err, val;
+
+	err = -EFAULT;
+	switch (cmd) {
+	case PPPIOCGFLAGS:
+		val = opt->ppp_flags;
+		if (put_user(val, p))
+			break;
+		err = 0;
+		break;
+	case PPPIOCSFLAGS:
+		if (get_user(val, p))
+			break;
+		opt->ppp_flags = val & ~SC_RCV_BITS;
+		err = 0;
+		break;
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+static const struct ppp_channel_ops pptp_chan_ops = {
+	.start_xmit = pptp_xmit,
+	.ioctl      = pptp_ppp_ioctl,
+};
+
+static struct proto pptp_sk_proto __read_mostly = {
+	.name     = "PPTP",
+	.owner    = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static const struct proto_ops pptp_ops = {
+	.family     = AF_PPPOX,
+	.owner      = THIS_MODULE,
+	.release    = pptp_release,
+	.bind       = pptp_bind,
+	.connect    = pptp_connect,
+	.socketpair = sock_no_socketpair,
+	.accept     = sock_no_accept,
+	.getname    = pptp_getname,
+	.listen     = sock_no_listen,
+	.shutdown   = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg    = sock_no_sendmsg,
+	.recvmsg    = sock_no_recvmsg,
+	.mmap       = sock_no_mmap,
+	.ioctl      = pppox_ioctl,
+};
+
+static const struct pppox_proto pppox_pptp_proto = {
+	.create = pptp_create,
+	.owner  = THIS_MODULE,
+};
+
+static const struct gre_protocol gre_pptp_protocol = {
+	.handler = pptp_rcv,
+};
+
+static int __init pptp_init_module(void)
+{
+	int err = 0;
+	pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
+
+	callid_sock = vzalloc(array_size(sizeof(void *), (MAX_CALLID + 1)));
+	if (!callid_sock)
+		return -ENOMEM;
+
+	err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+	if (err) {
+		pr_err("PPTP: can't add gre protocol\n");
+		goto out_mem_free;
+	}
+
+	err = proto_register(&pptp_sk_proto, 0);
+	if (err) {
+		pr_err("PPTP: can't register sk_proto\n");
+		goto out_gre_del_protocol;
+	}
+
+	err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
+	if (err) {
+		pr_err("PPTP: can't register pppox_proto\n");
+		goto out_unregister_sk_proto;
+	}
+
+	return 0;
+
+out_unregister_sk_proto:
+	proto_unregister(&pptp_sk_proto);
+out_gre_del_protocol:
+	gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+out_mem_free:
+	vfree(callid_sock);
+
+	return err;
+}
+
+static void __exit pptp_exit_module(void)
+{
+	unregister_pppox_proto(PX_PROTO_PPTP);
+	proto_unregister(&pptp_sk_proto);
+	gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+	vfree(callid_sock);
+}
+
+module_init(pptp_init_module);
+module_exit(pptp_exit_module);
+
+MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);