v4.19.13 snapshot.
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 0000000..f81df91
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,56 @@
+#
+# CAIF physical drivers
+#
+
+comment "CAIF transport drivers"
+
+config CAIF_TTY
+	tristate "CAIF TTY transport driver"
+	depends on CAIF && TTY
+	default n
+	---help---
+	The CAIF TTY transport driver is a Line Discipline (ldisc)
+	identified as N_CAIF. When this ldisc is opened from user space
+	it will redirect the TTY's traffic into the CAIF stack.
+
+config CAIF_SPI_SLAVE
+	tristate "CAIF SPI transport driver for slave interface"
+	depends on CAIF && HAS_DMA
+	default n
+	---help---
+	The CAIF Link layer SPI Protocol driver for Slave SPI interface.
+	This driver implements a platform driver to accommodate for a
+	platform specific SPI device. A sample CAIF SPI Platform device is
+	provided in Documentation/networking/caif/spi_porting.txt
+
+config CAIF_SPI_SYNC
+	bool "Next command and length in start of frame"
+	depends on CAIF_SPI_SLAVE
+	default n
+	---help---
+	Putting the next command and length in the start of the frame can
+	help to synchronize to the next transfer in case of over or under-runs.
+	This option also needs to be enabled on the modem.
+
+config CAIF_HSI
+       tristate "CAIF HSI transport driver"
+       depends on CAIF
+       default n
+       ---help---
+       The caif low level driver for CAIF over HSI.
+       Be aware that if you enable this then you also need to
+       enable a low-level HSI driver.
+
+config CAIF_VIRTIO
+	tristate "CAIF virtio transport driver"
+	depends on CAIF && HAS_DMA
+	select VHOST_RING
+	select VIRTIO
+	select GENERIC_ALLOCATOR
+	default n
+	---help---
+	The caif driver for CAIF over Virtio.
+
+if CAIF_VIRTIO
+source "drivers/vhost/Kconfig.vringh"
+endif
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 0000000..54ae116
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
+
+# Serial interface
+obj-$(CONFIG_CAIF_TTY) += caif_serial.o
+
+# SPI slave physical interfaces module
+cfspi_slave-objs := caif_spi.o caif_spi_slave.o
+obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# HSI interface
+obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
+
+# Virtio interface
+obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
new file mode 100644
index 0000000..433a14b
--- /dev/null
+++ b/drivers/net/caif/caif_hsi.c
@@ -0,0 +1,1469 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:  Daniel Martensson
+ *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@lockless.no
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/if_arp.h>
+#include <linux/timer.h>
+#include <net/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_hsi.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson");
+MODULE_DESCRIPTION("CAIF HSI driver");
+
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
+				(((pow)-((x)&((pow)-1)))))
+
+static const struct cfhsi_config  hsi_default_config = {
+
+	/* Inactivity timeout on HSI, ms */
+	.inactivity_timeout = HZ,
+
+	/* Aggregation timeout (ms) of zero means no aggregation is done*/
+	.aggregation_timeout = 1,
+
+	/*
+	 * HSI link layer flow-control thresholds.
+	 * Threshold values for the HSI packet queue. Flow-control will be
+	 * asserted when the number of packets exceeds q_high_mark. It will
+	 * not be de-asserted before the number of packets drops below
+	 * q_low_mark.
+	 * Warning: A high threshold value might increase throughput but it
+	 * will at the same time prevent channel prioritization and increase
+	 * the risk of flooding the modem. The high threshold should be above
+	 * the low.
+	 */
+	.q_high_mark = 100,
+	.q_low_mark = 50,
+
+	/*
+	 * HSI padding options.
+	 * Warning: must be a base of 2 (& operation used) and can not be zero !
+	 */
+	.head_align = 4,
+	.tail_align = 4,
+};
+
+#define ON 1
+#define OFF 0
+
+static LIST_HEAD(cfhsi_list);
+
+static void cfhsi_inactivity_tout(struct timer_list *t)
+{
+	struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
+
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	/* Schedule power down work queue. */
+	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
+}
+
+static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
+					   const struct sk_buff *skb,
+					   int direction)
+{
+	struct caif_payload_info *info;
+	int hpad, tpad, len;
+
+	info = (struct caif_payload_info *)&skb->cb;
+	hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+	tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
+	len = skb->len + hpad + tpad;
+
+	if (direction > 0)
+		cfhsi->aggregation_len += len;
+	else if (direction < 0)
+		cfhsi->aggregation_len -= len;
+}
+
+static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
+{
+	int i;
+
+	if (cfhsi->cfg.aggregation_timeout == 0)
+		return true;
+
+	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
+		if (cfhsi->qhead[i].qlen)
+			return true;
+	}
+
+	/* TODO: Use aggregation_len instead */
+	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
+		return true;
+
+	return false;
+}
+
+static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
+{
+	struct sk_buff *skb;
+	int i;
+
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
+		skb = skb_dequeue(&cfhsi->qhead[i]);
+		if (skb)
+			break;
+	}
+
+	return skb;
+}
+
+static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
+{
+	int i, len = 0;
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
+		len += skb_queue_len(&cfhsi->qhead[i]);
+	return len;
+}
+
+static void cfhsi_abort_tx(struct cfhsi *cfhsi)
+{
+	struct sk_buff *skb;
+
+	for (;;) {
+		spin_lock_bh(&cfhsi->lock);
+		skb = cfhsi_dequeue(cfhsi);
+		if (!skb)
+			break;
+
+		cfhsi->ndev->stats.tx_errors++;
+		cfhsi->ndev->stats.tx_dropped++;
+		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+		spin_unlock_bh(&cfhsi->lock);
+		kfree_skb(skb);
+	}
+	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		mod_timer(&cfhsi->inactivity_timer,
+			jiffies + cfhsi->cfg.inactivity_timeout);
+	spin_unlock_bh(&cfhsi->lock);
+}
+
+static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
+{
+	char buffer[32]; /* Any reasonable value */
+	size_t fifo_occupancy;
+	int ret;
+
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	do {
+		ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
+				&fifo_occupancy);
+		if (ret) {
+			netdev_warn(cfhsi->ndev,
+				"%s: can't get FIFO occupancy: %d.\n",
+				__func__, ret);
+			break;
+		} else if (!fifo_occupancy)
+			/* No more data, exitting normally */
+			break;
+
+		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
+		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
+		ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
+				cfhsi->ops);
+		if (ret) {
+			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
+			netdev_warn(cfhsi->ndev,
+				"%s: can't read data: %d.\n",
+				__func__, ret);
+			break;
+		}
+
+		ret = 5 * HZ;
+		ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
+			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
+
+		if (ret < 0) {
+			netdev_warn(cfhsi->ndev,
+				"%s: can't wait for flush complete: %d.\n",
+				__func__, ret);
+			break;
+		} else if (!ret) {
+			ret = -ETIMEDOUT;
+			netdev_warn(cfhsi->ndev,
+				"%s: timeout waiting for flush complete.\n",
+				__func__);
+			break;
+		}
+	} while (1);
+
+	return ret;
+}
+
+static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+	int nfrms = 0;
+	int pld_len = 0;
+	struct sk_buff *skb;
+	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+
+	skb = cfhsi_dequeue(cfhsi);
+	if (!skb)
+		return 0;
+
+	/* Clear offset. */
+	desc->offset = 0;
+
+	/* Check if we can embed a CAIF frame. */
+	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
+		struct caif_payload_info *info;
+		int hpad;
+		int tpad;
+
+		/* Calculate needed head alignment and tail alignment. */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
+
+		/* Check if frame still fits with added alignment. */
+		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
+			u8 *pemb = desc->emb_frm;
+			desc->offset = CFHSI_DESC_SHORT_SZ;
+			*pemb = (u8)(hpad - 1);
+			pemb += hpad;
+
+			/* Update network statistics. */
+			spin_lock_bh(&cfhsi->lock);
+			cfhsi->ndev->stats.tx_packets++;
+			cfhsi->ndev->stats.tx_bytes += skb->len;
+			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+			spin_unlock_bh(&cfhsi->lock);
+
+			/* Copy in embedded CAIF frame. */
+			skb_copy_bits(skb, 0, pemb, skb->len);
+
+			/* Consume the SKB */
+			consume_skb(skb);
+			skb = NULL;
+		}
+	}
+
+	/* Create payload CAIF frames. */
+	while (nfrms < CFHSI_MAX_PKTS) {
+		struct caif_payload_info *info;
+		int hpad;
+		int tpad;
+
+		if (!skb)
+			skb = cfhsi_dequeue(cfhsi);
+
+		if (!skb)
+			break;
+
+		/* Calculate needed head alignment and tail alignment. */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
+
+		/* Fill in CAIF frame length in descriptor. */
+		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
+
+		/* Fill head padding information. */
+		*pfrm = (u8)(hpad - 1);
+		pfrm += hpad;
+
+		/* Update network statistics. */
+		spin_lock_bh(&cfhsi->lock);
+		cfhsi->ndev->stats.tx_packets++;
+		cfhsi->ndev->stats.tx_bytes += skb->len;
+		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+		spin_unlock_bh(&cfhsi->lock);
+
+		/* Copy in CAIF frame. */
+		skb_copy_bits(skb, 0, pfrm, skb->len);
+
+		/* Update payload length. */
+		pld_len += desc->cffrm_len[nfrms];
+
+		/* Update frame pointer. */
+		pfrm += skb->len + tpad;
+
+		/* Consume the SKB */
+		consume_skb(skb);
+		skb = NULL;
+
+		/* Update number of frames. */
+		nfrms++;
+	}
+
+	/* Unused length fields should be zero-filled (according to SPEC). */
+	while (nfrms < CFHSI_MAX_PKTS) {
+		desc->cffrm_len[nfrms] = 0x0000;
+		nfrms++;
+	}
+
+	/* Check if we can piggy-back another descriptor. */
+	if (cfhsi_can_send_aggregate(cfhsi))
+		desc->header |= CFHSI_PIGGY_DESC;
+	else
+		desc->header &= ~CFHSI_PIGGY_DESC;
+
+	return CFHSI_DESC_SZ + pld_len;
+}
+
+static void cfhsi_start_tx(struct cfhsi *cfhsi)
+{
+	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+	int len, res;
+
+	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	do {
+		/* Create HSI frame. */
+		len = cfhsi_tx_frm(desc, cfhsi);
+		if (!len) {
+			spin_lock_bh(&cfhsi->lock);
+			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
+				spin_unlock_bh(&cfhsi->lock);
+				res = -EAGAIN;
+				continue;
+			}
+			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+			/* Start inactivity timer. */
+			mod_timer(&cfhsi->inactivity_timer,
+				jiffies + cfhsi->cfg.inactivity_timeout);
+			spin_unlock_bh(&cfhsi->lock);
+			break;
+		}
+
+		/* Set up new transfer. */
+		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
+		if (WARN_ON(res < 0))
+			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
+				__func__, res);
+	} while (res < 0);
+}
+
+static void cfhsi_tx_done(struct cfhsi *cfhsi)
+{
+	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/*
+	 * Send flow on if flow off has been previously signalled
+	 * and number of packets is below low water mark.
+	 */
+	spin_lock_bh(&cfhsi->lock);
+	if (cfhsi->flow_off_sent &&
+			cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
+			cfhsi->cfdev.flowctrl) {
+
+		cfhsi->flow_off_sent = 0;
+		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
+	}
+
+	if (cfhsi_can_send_aggregate(cfhsi)) {
+		spin_unlock_bh(&cfhsi->lock);
+		cfhsi_start_tx(cfhsi);
+	} else {
+		mod_timer(&cfhsi->aggregation_timer,
+			jiffies + cfhsi->cfg.aggregation_timeout);
+		spin_unlock_bh(&cfhsi->lock);
+	}
+
+	return;
+}
+
+static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
+{
+	struct cfhsi *cfhsi;
+
+	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+	cfhsi_tx_done(cfhsi);
+}
+
+static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+	int xfer_sz = 0;
+	int nfrms = 0;
+	u16 *plen = NULL;
+	u8 *pfrm = NULL;
+
+	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
+			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
+		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
+			__func__);
+		return -EPROTO;
+	}
+
+	/* Check for embedded CAIF frame. */
+	if (desc->offset) {
+		struct sk_buff *skb;
+		int len = 0;
+		pfrm = ((u8 *)desc) + desc->offset;
+
+		/* Remove offset padding. */
+		pfrm += *pfrm + 1;
+
+		/* Read length of CAIF frame (little endian). */
+		len = *pfrm;
+		len |= ((*(pfrm+1)) << 8) & 0xFF00;
+		len += 2;	/* Add FCS fields. */
+
+		/* Sanity check length of CAIF frame. */
+		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
+			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
+				__func__);
+			return -EPROTO;
+		}
+
+		/* Allocate SKB (OK even in IRQ context). */
+		skb = alloc_skb(len + 1, GFP_ATOMIC);
+		if (!skb) {
+			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
+				__func__);
+			return -ENOMEM;
+		}
+		caif_assert(skb != NULL);
+
+		skb_put_data(skb, pfrm, len);
+
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfhsi->ndev;
+
+		/*
+		 * We are in a callback handler and
+		 * unfortunately we don't know what context we're
+		 * running in.
+		 */
+		if (in_interrupt())
+			netif_rx(skb);
+		else
+			netif_rx_ni(skb);
+
+		/* Update network statistics. */
+		cfhsi->ndev->stats.rx_packets++;
+		cfhsi->ndev->stats.rx_bytes += len;
+	}
+
+	/* Calculate transfer length. */
+	plen = desc->cffrm_len;
+	while (nfrms < CFHSI_MAX_PKTS && *plen) {
+		xfer_sz += *plen;
+		plen++;
+		nfrms++;
+	}
+
+	/* Check for piggy-backed descriptor. */
+	if (desc->header & CFHSI_PIGGY_DESC)
+		xfer_sz += CFHSI_DESC_SZ;
+
+	if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
+		netdev_err(cfhsi->ndev,
+				"%s: Invalid payload len: %d, ignored.\n",
+			__func__, xfer_sz);
+		return -EPROTO;
+	}
+	return xfer_sz;
+}
+
+static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
+{
+	int xfer_sz = 0;
+	int nfrms = 0;
+	u16 *plen;
+
+	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
+			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
+
+		pr_err("Invalid descriptor. %x %x\n", desc->header,
+				desc->offset);
+		return -EPROTO;
+	}
+
+	/* Calculate transfer length. */
+	plen = desc->cffrm_len;
+	while (nfrms < CFHSI_MAX_PKTS && *plen) {
+		xfer_sz += *plen;
+		plen++;
+		nfrms++;
+	}
+
+	if (xfer_sz % 4) {
+		pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
+		return -EPROTO;
+	}
+	return xfer_sz;
+}
+
+static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+	int rx_sz = 0;
+	int nfrms = 0;
+	u16 *plen = NULL;
+	u8 *pfrm = NULL;
+
+	/* Sanity check header and offset. */
+	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
+			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
+		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
+			__func__);
+		return -EPROTO;
+	}
+
+	/* Set frame pointer to start of payload. */
+	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+	plen = desc->cffrm_len;
+
+	/* Skip already processed frames. */
+	while (nfrms < cfhsi->rx_state.nfrms) {
+		pfrm += *plen;
+		rx_sz += *plen;
+		plen++;
+		nfrms++;
+	}
+
+	/* Parse payload. */
+	while (nfrms < CFHSI_MAX_PKTS && *plen) {
+		struct sk_buff *skb;
+		u8 *pcffrm = NULL;
+		int len;
+
+		/* CAIF frame starts after head padding. */
+		pcffrm = pfrm + *pfrm + 1;
+
+		/* Read length of CAIF frame (little endian). */
+		len = *pcffrm;
+		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
+		len += 2;	/* Add FCS fields. */
+
+		/* Sanity check length of CAIF frames. */
+		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
+			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
+				__func__);
+			return -EPROTO;
+		}
+
+		/* Allocate SKB (OK even in IRQ context). */
+		skb = alloc_skb(len + 1, GFP_ATOMIC);
+		if (!skb) {
+			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
+				__func__);
+			cfhsi->rx_state.nfrms = nfrms;
+			return -ENOMEM;
+		}
+		caif_assert(skb != NULL);
+
+		skb_put_data(skb, pcffrm, len);
+
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfhsi->ndev;
+
+		/*
+		 * We're called in callback from HSI
+		 * and don't know the context we're running in.
+		 */
+		if (in_interrupt())
+			netif_rx(skb);
+		else
+			netif_rx_ni(skb);
+
+		/* Update network statistics. */
+		cfhsi->ndev->stats.rx_packets++;
+		cfhsi->ndev->stats.rx_bytes += len;
+
+		pfrm += *plen;
+		rx_sz += *plen;
+		plen++;
+		nfrms++;
+	}
+
+	return rx_sz;
+}
+
+static void cfhsi_rx_done(struct cfhsi *cfhsi)
+{
+	int res;
+	int desc_pld_len = 0, rx_len, rx_state;
+	struct cfhsi_desc *desc = NULL;
+	u8 *rx_ptr, *rx_buf;
+	struct cfhsi_desc *piggy_desc = NULL;
+
+	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
+
+	netdev_dbg(cfhsi->ndev, "%s\n", __func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/* Update inactivity timer if pending. */
+	spin_lock_bh(&cfhsi->lock);
+	mod_timer_pending(&cfhsi->inactivity_timer,
+			jiffies + cfhsi->cfg.inactivity_timeout);
+	spin_unlock_bh(&cfhsi->lock);
+
+	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
+		desc_pld_len = cfhsi_rx_desc_len(desc);
+
+		if (desc_pld_len < 0)
+			goto out_of_sync;
+
+		rx_buf = cfhsi->rx_buf;
+		rx_len = desc_pld_len;
+		if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
+			rx_len += CFHSI_DESC_SZ;
+		if (desc_pld_len == 0)
+			rx_buf = cfhsi->rx_flip_buf;
+	} else {
+		rx_buf = cfhsi->rx_flip_buf;
+
+		rx_len = CFHSI_DESC_SZ;
+		if (cfhsi->rx_state.pld_len > 0 &&
+				(desc->header & CFHSI_PIGGY_DESC)) {
+
+			piggy_desc = (struct cfhsi_desc *)
+				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
+						cfhsi->rx_state.pld_len);
+
+			cfhsi->rx_state.piggy_desc = true;
+
+			/* Extract payload len from piggy-backed descriptor. */
+			desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
+			if (desc_pld_len < 0)
+				goto out_of_sync;
+
+			if (desc_pld_len > 0) {
+				rx_len = desc_pld_len;
+				if (piggy_desc->header & CFHSI_PIGGY_DESC)
+					rx_len += CFHSI_DESC_SZ;
+			}
+
+			/*
+			 * Copy needed information from the piggy-backed
+			 * descriptor to the descriptor in the start.
+			 */
+			memcpy(rx_buf, (u8 *)piggy_desc,
+					CFHSI_DESC_SHORT_SZ);
+		}
+	}
+
+	if (desc_pld_len) {
+		rx_state = CFHSI_RX_STATE_PAYLOAD;
+		rx_ptr = rx_buf + CFHSI_DESC_SZ;
+	} else {
+		rx_state = CFHSI_RX_STATE_DESC;
+		rx_ptr = rx_buf;
+		rx_len = CFHSI_DESC_SZ;
+	}
+
+	/* Initiate next read */
+	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
+		/* Set up new transfer. */
+		netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
+				__func__);
+
+		res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
+				cfhsi->ops);
+		if (WARN_ON(res < 0)) {
+			netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
+				__func__, res);
+			cfhsi->ndev->stats.rx_errors++;
+			cfhsi->ndev->stats.rx_dropped++;
+		}
+	}
+
+	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
+		/* Extract payload from descriptor */
+		if (cfhsi_rx_desc(desc, cfhsi) < 0)
+			goto out_of_sync;
+	} else {
+		/* Extract payload */
+		if (cfhsi_rx_pld(desc, cfhsi) < 0)
+			goto out_of_sync;
+		if (piggy_desc) {
+			/* Extract any payload in piggyback descriptor. */
+			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
+				goto out_of_sync;
+			/* Mark no embedded frame after extracting it */
+			piggy_desc->offset = 0;
+		}
+	}
+
+	/* Update state info */
+	memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
+	cfhsi->rx_state.state = rx_state;
+	cfhsi->rx_ptr = rx_ptr;
+	cfhsi->rx_len = rx_len;
+	cfhsi->rx_state.pld_len = desc_pld_len;
+	cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
+
+	if (rx_buf != cfhsi->rx_buf)
+		swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
+	return;
+
+out_of_sync:
+	netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
+	print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
+			cfhsi->rx_buf, CFHSI_DESC_SZ);
+	schedule_work(&cfhsi->out_of_sync_work);
+}
+
+static void cfhsi_rx_slowpath(struct timer_list *t)
+{
+	struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
+
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	cfhsi_rx_done(cfhsi);
+}
+
+static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
+{
+	struct cfhsi *cfhsi;
+
+	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
+		wake_up_interruptible(&cfhsi->flush_fifo_wait);
+	else
+		cfhsi_rx_done(cfhsi);
+}
+
+static void cfhsi_wake_up(struct work_struct *work)
+{
+	struct cfhsi *cfhsi = NULL;
+	int res;
+	int len;
+	long ret;
+
+	cfhsi = container_of(work, struct cfhsi, wake_up_work);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
+		/* It happenes when wakeup is requested by
+		 * both ends at the same time. */
+		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+		clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+		return;
+	}
+
+	/* Activate wake line. */
+	cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
+
+	netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
+		__func__);
+
+	/* Wait for acknowledge. */
+	ret = CFHSI_WAKE_TOUT;
+	ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
+					test_and_clear_bit(CFHSI_WAKE_UP_ACK,
+							&cfhsi->bits), ret);
+	if (unlikely(ret < 0)) {
+		/* Interrupted by signal. */
+		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
+			__func__, ret);
+
+		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
+		return;
+	} else if (!ret) {
+		bool ca_wake = false;
+		size_t fifo_occupancy = 0;
+
+		/* Wakeup timeout */
+		netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
+			__func__);
+
+		/* Check FIFO to check if modem has sent something. */
+		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
+					&fifo_occupancy));
+
+		netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
+				__func__, (unsigned) fifo_occupancy);
+
+		/* Check if we misssed the interrupt. */
+		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
+							&ca_wake));
+
+		if (ca_wake) {
+			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
+				__func__);
+
+			/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
+			clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+
+			/* Continue execution. */
+			goto wake_ack;
+		}
+
+		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
+		return;
+	}
+wake_ack:
+	netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
+		__func__);
+
+	/* Clear power up bit. */
+	set_bit(CFHSI_AWAKE, &cfhsi->bits);
+	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+
+	/* Resume read operation. */
+	netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
+	res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
+
+	if (WARN_ON(res < 0))
+		netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
+
+	/* Clear power up acknowledment. */
+	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+
+	spin_lock_bh(&cfhsi->lock);
+
+	/* Resume transmit if queues are not empty. */
+	if (!cfhsi_tx_queue_len(cfhsi)) {
+		netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
+			__func__);
+		/* Start inactivity timer. */
+		mod_timer(&cfhsi->inactivity_timer,
+				jiffies + cfhsi->cfg.inactivity_timeout);
+		spin_unlock_bh(&cfhsi->lock);
+		return;
+	}
+
+	netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
+		__func__);
+
+	spin_unlock_bh(&cfhsi->lock);
+
+	/* Create HSI frame. */
+	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
+
+	if (likely(len > 0)) {
+		/* Set up new transfer. */
+		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
+		if (WARN_ON(res < 0)) {
+			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
+				__func__, res);
+			cfhsi_abort_tx(cfhsi);
+		}
+	} else {
+		netdev_err(cfhsi->ndev,
+				"%s: Failed to create HSI frame: %d.\n",
+				__func__, len);
+	}
+}
+
+static void cfhsi_wake_down(struct work_struct *work)
+{
+	long ret;
+	struct cfhsi *cfhsi = NULL;
+	size_t fifo_occupancy = 0;
+	int retry = CFHSI_WAKE_TOUT;
+
+	cfhsi = container_of(work, struct cfhsi, wake_down_work);
+	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/* Deactivate wake line. */
+	cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
+
+	/* Wait for acknowledge. */
+	ret = CFHSI_WAKE_TOUT;
+	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
+					test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
+							&cfhsi->bits), ret);
+	if (ret < 0) {
+		/* Interrupted by signal. */
+		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
+			__func__, ret);
+		return;
+	} else if (!ret) {
+		bool ca_wake = true;
+
+		/* Timeout */
+		netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
+
+		/* Check if we misssed the interrupt. */
+		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
+							&ca_wake));
+		if (!ca_wake)
+			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
+				__func__);
+	}
+
+	/* Check FIFO occupancy. */
+	while (retry) {
+		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
+							&fifo_occupancy));
+
+		if (!fifo_occupancy)
+			break;
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(1);
+		retry--;
+	}
+
+	if (!retry)
+		netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
+
+	/* Clear AWAKE condition. */
+	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
+
+	/* Cancel pending RX requests. */
+	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
+}
+
+static void cfhsi_out_of_sync(struct work_struct *work)
+{
+	struct cfhsi *cfhsi = NULL;
+
+	cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
+
+	rtnl_lock();
+	dev_close(cfhsi->ndev);
+	rtnl_unlock();
+}
+
+static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
+{
+	struct cfhsi *cfhsi = NULL;
+
+	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+	wake_up_interruptible(&cfhsi->wake_up_wait);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/* Schedule wake up work queue if the peer initiates. */
+	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+}
+
+static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
+{
+	struct cfhsi *cfhsi = NULL;
+
+	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	/* Initiating low power is only permitted by the host (us). */
+	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+	wake_up_interruptible(&cfhsi->wake_down_wait);
+}
+
+static void cfhsi_aggregation_tout(struct timer_list *t)
+{
+	struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
+
+	netdev_dbg(cfhsi->ndev, "%s.\n",
+		__func__);
+
+	cfhsi_start_tx(cfhsi);
+}
+
+static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct cfhsi *cfhsi = NULL;
+	int start_xfer = 0;
+	int timer_active;
+	int prio;
+
+	if (!dev)
+		return -EINVAL;
+
+	cfhsi = netdev_priv(dev);
+
+	switch (skb->priority) {
+	case TC_PRIO_BESTEFFORT:
+	case TC_PRIO_FILLER:
+	case TC_PRIO_BULK:
+		prio = CFHSI_PRIO_BEBK;
+		break;
+	case TC_PRIO_INTERACTIVE_BULK:
+		prio = CFHSI_PRIO_VI;
+		break;
+	case TC_PRIO_INTERACTIVE:
+		prio = CFHSI_PRIO_VO;
+		break;
+	case TC_PRIO_CONTROL:
+	default:
+		prio = CFHSI_PRIO_CTL;
+		break;
+	}
+
+	spin_lock_bh(&cfhsi->lock);
+
+	/* Update aggregation statistics  */
+	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
+
+	/* Queue the SKB */
+	skb_queue_tail(&cfhsi->qhead[prio], skb);
+
+	/* Sanity check; xmit should not be called after unregister_netdev */
+	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
+		spin_unlock_bh(&cfhsi->lock);
+		cfhsi_abort_tx(cfhsi);
+		return -EINVAL;
+	}
+
+	/* Send flow off if number of packets is above high water mark. */
+	if (!cfhsi->flow_off_sent &&
+		cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
+		cfhsi->cfdev.flowctrl) {
+		cfhsi->flow_off_sent = 1;
+		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
+	}
+
+	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
+		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
+		start_xfer = 1;
+	}
+
+	if (!start_xfer) {
+		/* Send aggregate if it is possible */
+		bool aggregate_ready =
+			cfhsi_can_send_aggregate(cfhsi) &&
+			del_timer(&cfhsi->aggregation_timer) > 0;
+		spin_unlock_bh(&cfhsi->lock);
+		if (aggregate_ready)
+			cfhsi_start_tx(cfhsi);
+		return 0;
+	}
+
+	/* Delete inactivity timer if started. */
+	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
+
+	spin_unlock_bh(&cfhsi->lock);
+
+	if (timer_active) {
+		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+		int len;
+		int res;
+
+		/* Create HSI frame. */
+		len = cfhsi_tx_frm(desc, cfhsi);
+		WARN_ON(!len);
+
+		/* Set up new transfer. */
+		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
+		if (WARN_ON(res < 0)) {
+			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
+				__func__, res);
+			cfhsi_abort_tx(cfhsi);
+		}
+	} else {
+		/* Schedule wake up work queue if the we initiate. */
+		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+	}
+
+	return 0;
+}
+
+static const struct net_device_ops cfhsi_netdevops;
+
+static void cfhsi_setup(struct net_device *dev)
+{
+	int i;
+	struct cfhsi *cfhsi = netdev_priv(dev);
+	dev->features = 0;
+	dev->type = ARPHRD_CAIF;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
+	dev->priv_flags |= IFF_NO_QUEUE;
+	dev->needs_free_netdev = true;
+	dev->netdev_ops = &cfhsi_netdevops;
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
+		skb_queue_head_init(&cfhsi->qhead[i]);
+	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+	cfhsi->cfdev.use_frag = false;
+	cfhsi->cfdev.use_stx = false;
+	cfhsi->cfdev.use_fcs = false;
+	cfhsi->ndev = dev;
+	cfhsi->cfg = hsi_default_config;
+}
+
+static int cfhsi_open(struct net_device *ndev)
+{
+	struct cfhsi *cfhsi = netdev_priv(ndev);
+	int res;
+
+	clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
+
+	/* Initialize state vaiables. */
+	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+	cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
+
+	/* Set flow info */
+	cfhsi->flow_off_sent = 0;
+
+	/*
+	 * Allocate a TX buffer with the size of a HSI packet descriptors
+	 * and the necessary room for CAIF payload frames.
+	 */
+	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
+	if (!cfhsi->tx_buf) {
+		res = -ENODEV;
+		goto err_alloc_tx;
+	}
+
+	/*
+	 * Allocate a RX buffer with the size of two HSI packet descriptors and
+	 * the necessary room for CAIF payload frames.
+	 */
+	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
+	if (!cfhsi->rx_buf) {
+		res = -ENODEV;
+		goto err_alloc_rx;
+	}
+
+	cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
+	if (!cfhsi->rx_flip_buf) {
+		res = -ENODEV;
+		goto err_alloc_rx_flip;
+	}
+
+	/* Initialize aggregation timeout */
+	cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
+
+	/* Initialize recieve vaiables. */
+	cfhsi->rx_ptr = cfhsi->rx_buf;
+	cfhsi->rx_len = CFHSI_DESC_SZ;
+
+	/* Initialize spin locks. */
+	spin_lock_init(&cfhsi->lock);
+
+	/* Set up the driver. */
+	cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
+	cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
+	cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
+	cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
+
+	/* Initialize the work queues. */
+	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
+	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
+	INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
+
+	/* Clear all bit fields. */
+	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
+
+	/* Create work thread. */
+	cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
+	if (!cfhsi->wq) {
+		netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
+			__func__);
+		res = -ENODEV;
+		goto err_create_wq;
+	}
+
+	/* Initialize wait queues. */
+	init_waitqueue_head(&cfhsi->wake_up_wait);
+	init_waitqueue_head(&cfhsi->wake_down_wait);
+	init_waitqueue_head(&cfhsi->flush_fifo_wait);
+
+	/* Setup the inactivity timer. */
+	timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
+	/* Setup the slowpath RX timer. */
+	timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
+	/* Setup the aggregation timer. */
+	timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
+
+	/* Activate HSI interface. */
+	res = cfhsi->ops->cfhsi_up(cfhsi->ops);
+	if (res) {
+		netdev_err(cfhsi->ndev,
+			"%s: can't activate HSI interface: %d.\n",
+			__func__, res);
+		goto err_activate;
+	}
+
+	/* Flush FIFO */
+	res = cfhsi_flush_fifo(cfhsi);
+	if (res) {
+		netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
+			__func__, res);
+		goto err_net_reg;
+	}
+	return res;
+
+ err_net_reg:
+	cfhsi->ops->cfhsi_down(cfhsi->ops);
+ err_activate:
+	destroy_workqueue(cfhsi->wq);
+ err_create_wq:
+	kfree(cfhsi->rx_flip_buf);
+ err_alloc_rx_flip:
+	kfree(cfhsi->rx_buf);
+ err_alloc_rx:
+	kfree(cfhsi->tx_buf);
+ err_alloc_tx:
+	return res;
+}
+
+static int cfhsi_close(struct net_device *ndev)
+{
+	struct cfhsi *cfhsi = netdev_priv(ndev);
+	u8 *tx_buf, *rx_buf, *flip_buf;
+
+	/* going to shutdown driver */
+	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
+
+	/* Delete timers if pending */
+	del_timer_sync(&cfhsi->inactivity_timer);
+	del_timer_sync(&cfhsi->rx_slowpath_timer);
+	del_timer_sync(&cfhsi->aggregation_timer);
+
+	/* Cancel pending RX request (if any) */
+	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
+
+	/* Destroy workqueue */
+	destroy_workqueue(cfhsi->wq);
+
+	/* Store bufferes: will be freed later. */
+	tx_buf = cfhsi->tx_buf;
+	rx_buf = cfhsi->rx_buf;
+	flip_buf = cfhsi->rx_flip_buf;
+	/* Flush transmit queues. */
+	cfhsi_abort_tx(cfhsi);
+
+	/* Deactivate interface */
+	cfhsi->ops->cfhsi_down(cfhsi->ops);
+
+	/* Free buffers. */
+	kfree(tx_buf);
+	kfree(rx_buf);
+	kfree(flip_buf);
+	return 0;
+}
+
+static void cfhsi_uninit(struct net_device *dev)
+{
+	struct cfhsi *cfhsi = netdev_priv(dev);
+	ASSERT_RTNL();
+	symbol_put(cfhsi_get_device);
+	list_del(&cfhsi->list);
+}
+
+static const struct net_device_ops cfhsi_netdevops = {
+	.ndo_uninit = cfhsi_uninit,
+	.ndo_open = cfhsi_open,
+	.ndo_stop = cfhsi_close,
+	.ndo_start_xmit = cfhsi_xmit
+};
+
+static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
+{
+	int i;
+
+	if (!data) {
+		pr_debug("no params data found\n");
+		return;
+	}
+
+	i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
+	/*
+	 * Inactivity timeout in millisecs. Lowest possible value is 1,
+	 * and highest possible is NEXT_TIMER_MAX_DELTA.
+	 */
+	if (data[i]) {
+		u32 inactivity_timeout = nla_get_u32(data[i]);
+		/* Pre-calculate inactivity timeout. */
+		cfhsi->cfg.inactivity_timeout =	inactivity_timeout * HZ / 1000;
+		if (cfhsi->cfg.inactivity_timeout == 0)
+			cfhsi->cfg.inactivity_timeout = 1;
+		else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+			cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+	}
+
+	i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
+	if (data[i])
+		cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
+
+	i = __IFLA_CAIF_HSI_HEAD_ALIGN;
+	if (data[i])
+		cfhsi->cfg.head_align = nla_get_u32(data[i]);
+
+	i = __IFLA_CAIF_HSI_TAIL_ALIGN;
+	if (data[i])
+		cfhsi->cfg.tail_align = nla_get_u32(data[i]);
+
+	i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
+	if (data[i])
+		cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
+
+	i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
+	if (data[i])
+		cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
+}
+
+static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
+			       struct nlattr *data[],
+			       struct netlink_ext_ack *extack)
+{
+	cfhsi_netlink_parms(data, netdev_priv(dev));
+	netdev_state_change(dev);
+	return 0;
+}
+
+static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
+	[__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
+	[__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
+	[__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
+	[__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
+	[__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
+	[__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
+};
+
+static size_t caif_hsi_get_size(const struct net_device *dev)
+{
+	int i;
+	size_t s = 0;
+	for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
+		s += nla_total_size(caif_hsi_policy[i].len);
+	return s;
+}
+
+static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct cfhsi *cfhsi = netdev_priv(dev);
+
+	if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
+			cfhsi->cfg.inactivity_timeout) ||
+	    nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
+			cfhsi->cfg.aggregation_timeout) ||
+	    nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
+			cfhsi->cfg.head_align) ||
+	    nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
+			cfhsi->cfg.tail_align) ||
+	    nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
+			cfhsi->cfg.q_high_mark) ||
+	    nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
+			cfhsi->cfg.q_low_mark))
+		return -EMSGSIZE;
+
+	return 0;
+}
+
+static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
+			    struct nlattr *tb[], struct nlattr *data[],
+			    struct netlink_ext_ack *extack)
+{
+	struct cfhsi *cfhsi = NULL;
+	struct cfhsi_ops *(*get_ops)(void);
+
+	ASSERT_RTNL();
+
+	cfhsi = netdev_priv(dev);
+	cfhsi_netlink_parms(data, cfhsi);
+
+	get_ops = symbol_get(cfhsi_get_ops);
+	if (!get_ops) {
+		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+		return -ENODEV;
+	}
+
+	/* Assign the HSI device. */
+	cfhsi->ops = (*get_ops)();
+	if (!cfhsi->ops) {
+		pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+		goto err;
+	}
+
+	/* Assign the driver to this HSI device. */
+	cfhsi->ops->cb_ops = &cfhsi->cb_ops;
+	if (register_netdevice(dev)) {
+		pr_warn("%s: caif_hsi device registration failed\n", __func__);
+		goto err;
+	}
+	/* Add CAIF HSI device to list. */
+	list_add_tail(&cfhsi->list, &cfhsi_list);
+
+	return 0;
+err:
+	symbol_put(cfhsi_get_ops);
+	return -ENODEV;
+}
+
+static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
+	.kind		= "cfhsi",
+	.priv_size	= sizeof(struct cfhsi),
+	.setup		= cfhsi_setup,
+	.maxtype	= __IFLA_CAIF_HSI_MAX,
+	.policy	= caif_hsi_policy,
+	.newlink	= caif_hsi_newlink,
+	.changelink	= caif_hsi_changelink,
+	.get_size	= caif_hsi_get_size,
+	.fill_info	= caif_hsi_fill_info,
+};
+
+static void __exit cfhsi_exit_module(void)
+{
+	struct list_head *list_node;
+	struct list_head *n;
+	struct cfhsi *cfhsi;
+
+	rtnl_link_unregister(&caif_hsi_link_ops);
+
+	rtnl_lock();
+	list_for_each_safe(list_node, n, &cfhsi_list) {
+		cfhsi = list_entry(list_node, struct cfhsi, list);
+		unregister_netdev(cfhsi->ndev);
+	}
+	rtnl_unlock();
+}
+
+static int __init cfhsi_init_module(void)
+{
+	return rtnl_link_register(&caif_hsi_link_ops);
+}
+
+module_init(cfhsi_init_module);
+module_exit(cfhsi_exit_module);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 0000000..a0f954f
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:	Sjur Brendeland
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/cfcnfg.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sjur Brendeland");
+MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_CAIF);
+
+#define SEND_QUEUE_LOW 10
+#define SEND_QUEUE_HIGH 100
+#define CAIF_SENDING	        1 /* Bit 1 = 0x02*/
+#define CAIF_FLOW_OFF_SENT	4 /* Bit 4 = 0x10 */
+#define MAX_WRITE_CHUNK	     4096
+#define ON 1
+#define OFF 0
+#define CAIF_MAX_MTU 4096
+
+static DEFINE_SPINLOCK(ser_lock);
+static LIST_HEAD(ser_list);
+static LIST_HEAD(ser_release_list);
+
+static bool ser_loop;
+module_param(ser_loop, bool, 0444);
+MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
+
+static bool ser_use_stx = true;
+module_param(ser_use_stx, bool, 0444);
+MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
+
+static bool ser_use_fcs = true;
+
+module_param(ser_use_fcs, bool, 0444);
+MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
+
+static int ser_write_chunk = MAX_WRITE_CHUNK;
+module_param(ser_write_chunk, int, 0444);
+
+MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
+
+static struct dentry *debugfsdir;
+
+static int caif_net_open(struct net_device *dev);
+static int caif_net_close(struct net_device *dev);
+
+struct ser_device {
+	struct caif_dev_common common;
+	struct list_head node;
+	struct net_device *dev;
+	struct sk_buff_head head;
+	struct tty_struct *tty;
+	bool tx_started;
+	unsigned long state;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_tty_dir;
+	struct debugfs_blob_wrapper tx_blob;
+	struct debugfs_blob_wrapper rx_blob;
+	u8 rx_data[128];
+	u8 tx_data[128];
+	u8 tty_status;
+
+#endif
+};
+
+static void caifdev_setup(struct net_device *dev);
+static void ldisc_tx_wakeup(struct tty_struct *tty);
+#ifdef CONFIG_DEBUG_FS
+static inline void update_tty_status(struct ser_device *ser)
+{
+	ser->tty_status =
+		ser->tty->stopped << 5 |
+		ser->tty->flow_stopped << 3 |
+		ser->tty->packet << 2 |
+		ser->tty->port->low_latency << 1;
+}
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+	ser->debugfs_tty_dir =
+			debugfs_create_dir(tty->name, debugfsdir);
+	if (!IS_ERR(ser->debugfs_tty_dir)) {
+		debugfs_create_blob("last_tx_msg", 0400,
+				    ser->debugfs_tty_dir,
+				    &ser->tx_blob);
+
+		debugfs_create_blob("last_rx_msg", 0400,
+				    ser->debugfs_tty_dir,
+				    &ser->rx_blob);
+
+		debugfs_create_x32("ser_state", 0400,
+				   ser->debugfs_tty_dir,
+				   (u32 *)&ser->state);
+
+		debugfs_create_x8("tty_status", 0400,
+				  ser->debugfs_tty_dir,
+				  &ser->tty_status);
+
+	}
+	ser->tx_blob.data = ser->tx_data;
+	ser->tx_blob.size = 0;
+	ser->rx_blob.data = ser->rx_data;
+	ser->rx_blob.size = 0;
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+	debugfs_remove_recursive(ser->debugfs_tty_dir);
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+	if (size > sizeof(ser->rx_data))
+		size = sizeof(ser->rx_data);
+	memcpy(ser->rx_data, data, size);
+	ser->rx_blob.data = ser->rx_data;
+	ser->rx_blob.size = size;
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+	if (size > sizeof(ser->tx_data))
+		size = sizeof(ser->tx_data);
+	memcpy(ser->tx_data, data, size);
+	ser->tx_blob.data = ser->tx_data;
+	ser->tx_blob.size = size;
+}
+#else
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+}
+
+static inline void update_tty_status(struct ser_device *ser)
+{
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+#endif
+
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+			char *flags, int count)
+{
+	struct sk_buff *skb = NULL;
+	struct ser_device *ser;
+	int ret;
+
+	ser = tty->disc_data;
+
+	/*
+	 * NOTE: flags may contain information about break or overrun.
+	 * This is not yet handled.
+	 */
+
+
+	/*
+	 * Workaround for garbage at start of transmission,
+	 * only enable if STX handling is not enabled.
+	 */
+	if (!ser->common.use_stx && !ser->tx_started) {
+		dev_info(&ser->dev->dev,
+			"Bytes received before initial transmission -"
+			"bytes discarded.\n");
+		return;
+	}
+
+	BUG_ON(ser->dev == NULL);
+
+	/* Get a suitable caif packet and copy in data. */
+	skb = netdev_alloc_skb(ser->dev, count+1);
+	if (skb == NULL)
+		return;
+	skb_put_data(skb, data, count);
+
+	skb->protocol = htons(ETH_P_CAIF);
+	skb_reset_mac_header(skb);
+	debugfs_rx(ser, data, count);
+	/* Push received packet up the stack. */
+	ret = netif_rx_ni(skb);
+	if (!ret) {
+		ser->dev->stats.rx_packets++;
+		ser->dev->stats.rx_bytes += count;
+	} else
+		++ser->dev->stats.rx_dropped;
+	update_tty_status(ser);
+}
+
+static int handle_tx(struct ser_device *ser)
+{
+	struct tty_struct *tty;
+	struct sk_buff *skb;
+	int tty_wr, len, room;
+
+	tty = ser->tty;
+	ser->tx_started = true;
+
+	/* Enter critical section */
+	if (test_and_set_bit(CAIF_SENDING, &ser->state))
+		return 0;
+
+	/* skb_peek is safe because handle_tx is called after skb_queue_tail */
+	while ((skb = skb_peek(&ser->head)) != NULL) {
+
+		/* Make sure you don't write too much */
+		len = skb->len;
+		room = tty_write_room(tty);
+		if (!room)
+			break;
+		if (room > ser_write_chunk)
+			room = ser_write_chunk;
+		if (len > room)
+			len = room;
+
+		/* Write to tty or loopback */
+		if (!ser_loop) {
+			tty_wr = tty->ops->write(tty, skb->data, len);
+			update_tty_status(ser);
+		} else {
+			tty_wr = len;
+			ldisc_receive(tty, skb->data, NULL, len);
+		}
+		ser->dev->stats.tx_packets++;
+		ser->dev->stats.tx_bytes += tty_wr;
+
+		/* Error on TTY ?! */
+		if (tty_wr < 0)
+			goto error;
+		/* Reduce buffer written, and discard if empty */
+		skb_pull(skb, tty_wr);
+		if (skb->len == 0) {
+			struct sk_buff *tmp = skb_dequeue(&ser->head);
+			WARN_ON(tmp != skb);
+			if (in_interrupt())
+				dev_kfree_skb_irq(skb);
+			else
+				kfree_skb(skb);
+		}
+	}
+	/* Send flow off if queue is empty */
+	if (ser->head.qlen <= SEND_QUEUE_LOW &&
+		test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+		ser->common.flowctrl != NULL)
+				ser->common.flowctrl(ser->dev, ON);
+	clear_bit(CAIF_SENDING, &ser->state);
+	return 0;
+error:
+	clear_bit(CAIF_SENDING, &ser->state);
+	return tty_wr;
+}
+
+static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ser_device *ser;
+
+	BUG_ON(dev == NULL);
+	ser = netdev_priv(dev);
+
+	/* Send flow off once, on high water mark */
+	if (ser->head.qlen > SEND_QUEUE_HIGH &&
+		!test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+		ser->common.flowctrl != NULL)
+
+		ser->common.flowctrl(ser->dev, OFF);
+
+	skb_queue_tail(&ser->head, skb);
+	return handle_tx(ser);
+}
+
+
+static void ldisc_tx_wakeup(struct tty_struct *tty)
+{
+	struct ser_device *ser;
+
+	ser = tty->disc_data;
+	BUG_ON(ser == NULL);
+	WARN_ON(ser->tty != tty);
+	handle_tx(ser);
+}
+
+
+static void ser_release(struct work_struct *work)
+{
+	struct list_head list;
+	struct ser_device *ser, *tmp;
+
+	spin_lock(&ser_lock);
+	list_replace_init(&ser_release_list, &list);
+	spin_unlock(&ser_lock);
+
+	if (!list_empty(&list)) {
+		rtnl_lock();
+		list_for_each_entry_safe(ser, tmp, &list, node) {
+			dev_close(ser->dev);
+			unregister_netdevice(ser->dev);
+			debugfs_deinit(ser);
+		}
+		rtnl_unlock();
+	}
+}
+
+static DECLARE_WORK(ser_release_work, ser_release);
+
+static int ldisc_open(struct tty_struct *tty)
+{
+	struct ser_device *ser;
+	struct net_device *dev;
+	char name[64];
+	int result;
+
+	/* No write no play */
+	if (tty->ops->write == NULL)
+		return -EOPNOTSUPP;
+	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
+		return -EPERM;
+
+	/* release devices to avoid name collision */
+	ser_release(NULL);
+
+	result = snprintf(name, sizeof(name), "cf%s", tty->name);
+	if (result >= IFNAMSIZ)
+		return -EINVAL;
+	dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
+			   caifdev_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	ser = netdev_priv(dev);
+	ser->tty = tty_kref_get(tty);
+	ser->dev = dev;
+	debugfs_init(ser, tty);
+	tty->receive_room = N_TTY_BUF_SIZE;
+	tty->disc_data = ser;
+	set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	rtnl_lock();
+	result = register_netdevice(dev);
+	if (result) {
+		rtnl_unlock();
+		free_netdev(dev);
+		return -ENODEV;
+	}
+
+	spin_lock(&ser_lock);
+	list_add(&ser->node, &ser_list);
+	spin_unlock(&ser_lock);
+	rtnl_unlock();
+	netif_stop_queue(dev);
+	update_tty_status(ser);
+	return 0;
+}
+
+static void ldisc_close(struct tty_struct *tty)
+{
+	struct ser_device *ser = tty->disc_data;
+
+	tty_kref_put(ser->tty);
+
+	spin_lock(&ser_lock);
+	list_move(&ser->node, &ser_release_list);
+	spin_unlock(&ser_lock);
+	schedule_work(&ser_release_work);
+}
+
+/* The line discipline structure. */
+static struct tty_ldisc_ops caif_ldisc = {
+	.owner =	THIS_MODULE,
+	.magic =	TTY_LDISC_MAGIC,
+	.name =		"n_caif",
+	.open =		ldisc_open,
+	.close =	ldisc_close,
+	.receive_buf =	ldisc_receive,
+	.write_wakeup =	ldisc_tx_wakeup
+};
+
+static int register_ldisc(void)
+{
+	int result;
+
+	result = tty_register_ldisc(N_CAIF, &caif_ldisc);
+	if (result < 0) {
+		pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
+			result);
+		return result;
+	}
+	return result;
+}
+static const struct net_device_ops netdev_ops = {
+	.ndo_open = caif_net_open,
+	.ndo_stop = caif_net_close,
+	.ndo_start_xmit = caif_xmit
+};
+
+static void caifdev_setup(struct net_device *dev)
+{
+	struct ser_device *serdev = netdev_priv(dev);
+
+	dev->features = 0;
+	dev->netdev_ops = &netdev_ops;
+	dev->type = ARPHRD_CAIF;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu = CAIF_MAX_MTU;
+	dev->priv_flags |= IFF_NO_QUEUE;
+	dev->needs_free_netdev = true;
+	skb_queue_head_init(&serdev->head);
+	serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
+	serdev->common.use_frag = true;
+	serdev->common.use_stx = ser_use_stx;
+	serdev->common.use_fcs = ser_use_fcs;
+	serdev->dev = dev;
+}
+
+
+static int caif_net_open(struct net_device *dev)
+{
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int caif_net_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int __init caif_ser_init(void)
+{
+	int ret;
+
+	ret = register_ldisc();
+	debugfsdir = debugfs_create_dir("caif_serial", NULL);
+	return ret;
+}
+
+static void __exit caif_ser_exit(void)
+{
+	spin_lock(&ser_lock);
+	list_splice(&ser_list, &ser_release_list);
+	spin_unlock(&ser_lock);
+	ser_release(NULL);
+	cancel_work_sync(&ser_release_work);
+	tty_unregister_ldisc(N_CAIF);
+	debugfs_remove_recursive(debugfsdir);
+}
+
+module_init(caif_ser_init);
+module_exit(caif_ser_exit);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
new file mode 100644
index 0000000..d28a139
--- /dev/null
+++ b/drivers/net/caif/caif_spi.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:  Daniel Martensson
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_spi.h>
+
+#ifndef CONFIG_CAIF_SPI_SYNC
+#define FLAVOR "Flavour: Vanilla.\n"
+#else
+#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
+#endif /* CONFIG_CAIF_SPI_SYNC */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson");
+MODULE_DESCRIPTION("CAIF SPI driver");
+
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
+static bool spi_loop;
+module_param(spi_loop, bool, 0444);
+MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
+
+/* SPI frame alignment. */
+module_param(spi_frm_align, int, 0444);
+MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+module_param(spi_up_head_align, int, 0444);
+MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
+
+module_param(spi_up_tail_align, int, 0444);
+MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
+
+module_param(spi_down_head_align, int, 0444);
+MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
+
+module_param(spi_down_tail_align, int, 0444);
+MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
+
+#ifdef CONFIG_ARM
+#define BYTE_HEX_FMT "%02X"
+#else
+#define BYTE_HEX_FMT "%02hhX"
+#endif
+
+#define SPI_MAX_PAYLOAD_SIZE 4096
+/*
+ * Threshold values for the SPI packet queue. Flowcontrol will be asserted
+ * when the number of packets exceeds HIGH_WATER_MARK. It will not be
+ * deasserted before the number of packets drops below LOW_WATER_MARK.
+ */
+#define LOW_WATER_MARK   100
+#define HIGH_WATER_MARK  (LOW_WATER_MARK*5)
+
+#ifdef CONFIG_UML
+
+/*
+ * We sometimes use UML for debugging, but it cannot handle
+ * dma_alloc_coherent so we have to wrap it.
+ */
+static inline void *dma_alloc(dma_addr_t *daddr)
+{
+	return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
+}
+
+static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+{
+	kfree(cpu_addr);
+}
+
+#else
+
+static inline void *dma_alloc(dma_addr_t *daddr)
+{
+	return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
+				GFP_KERNEL);
+}
+
+static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+{
+	dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
+}
+#endif	/* CONFIG_UML */
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DEBUGFS_BUF_SIZE	4096
+
+static struct dentry *dbgfs_root;
+
+static inline void driver_debugfs_create(void)
+{
+	dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
+}
+
+static inline void driver_debugfs_remove(void)
+{
+	debugfs_remove(dbgfs_root);
+}
+
+static inline void dev_debugfs_rem(struct cfspi *cfspi)
+{
+	debugfs_remove(cfspi->dbgfs_frame);
+	debugfs_remove(cfspi->dbgfs_state);
+	debugfs_remove(cfspi->dbgfs_dir);
+}
+
+static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	char *buf;
+	int len = 0;
+	ssize_t size;
+	struct cfspi *cfspi = file->private_data;
+
+	buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	/* Print out debug information. */
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"CAIF SPI debug information:\n");
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"STATE: %d\n", cfspi->dbg_state);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Previous CMD: 0x%x\n", cfspi->pcmd);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current CMD: 0x%x\n", cfspi->cmd);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Previous TX len: %d\n", cfspi->tx_ppck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Previous RX len: %d\n", cfspi->rx_ppck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current TX len: %d\n", cfspi->tx_cpck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current RX len: %d\n", cfspi->rx_cpck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Next TX len: %d\n", cfspi->tx_npck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Next RX len: %d\n", cfspi->rx_npck_len);
+
+	if (len > DEBUGFS_BUF_SIZE)
+		len = DEBUGFS_BUF_SIZE;
+
+	size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return size;
+}
+
+static ssize_t print_frame(char *buf, size_t size, char *frm,
+			   size_t count, size_t cut)
+{
+	int len = 0;
+	int i;
+	for (i = 0; i < count; i++) {
+		len += snprintf((buf + len), (size - len),
+					"[0x" BYTE_HEX_FMT "]",
+					frm[i]);
+		if ((i == cut) && (count > (cut * 2))) {
+			/* Fast forward. */
+			i = count - cut;
+			len += snprintf((buf + len), (size - len),
+					"--- %zu bytes skipped ---\n",
+					count - (cut * 2));
+		}
+
+		if ((!(i % 10)) && i) {
+			len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+					"\n");
+		}
+	}
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
+	return len;
+}
+
+static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	char *buf;
+	int len = 0;
+	ssize_t size;
+	struct cfspi *cfspi;
+
+	cfspi = file->private_data;
+	buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	/* Print out debug information. */
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current frame:\n");
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
+
+	len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
+			   cfspi->xfer.va_tx[0],
+			   (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Rx data (Len: %d):\n", cfspi->rx_cpck_len);
+
+	len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
+			   cfspi->xfer.va_rx,
+			   (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
+
+	size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return size;
+}
+
+static const struct file_operations dbgfs_state_fops = {
+	.open = simple_open,
+	.read = dbgfs_state,
+	.owner = THIS_MODULE
+};
+
+static const struct file_operations dbgfs_frame_fops = {
+	.open = simple_open,
+	.read = dbgfs_frame,
+	.owner = THIS_MODULE
+};
+
+static inline void dev_debugfs_add(struct cfspi *cfspi)
+{
+	cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
+	cfspi->dbgfs_state = debugfs_create_file("state", 0444,
+						 cfspi->dbgfs_dir, cfspi,
+						 &dbgfs_state_fops);
+	cfspi->dbgfs_frame = debugfs_create_file("frame", 0444,
+						 cfspi->dbgfs_dir, cfspi,
+						 &dbgfs_frame_fops);
+}
+
+inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
+{
+	cfspi->dbg_state = state;
+};
+#else
+
+static inline void driver_debugfs_create(void)
+{
+}
+
+static inline void driver_debugfs_remove(void)
+{
+}
+
+static inline void dev_debugfs_add(struct cfspi *cfspi)
+{
+}
+
+static inline void dev_debugfs_rem(struct cfspi *cfspi)
+{
+}
+
+inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
+{
+}
+#endif				/* CONFIG_DEBUG_FS */
+
+static LIST_HEAD(cfspi_list);
+static spinlock_t cfspi_list_lock;
+
+/* SPI uplink head alignment. */
+static ssize_t up_head_align_show(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_up_head_align);
+}
+
+static DRIVER_ATTR_RO(up_head_align);
+
+/* SPI uplink tail alignment. */
+static ssize_t up_tail_align_show(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_up_tail_align);
+}
+
+static DRIVER_ATTR_RO(up_tail_align);
+
+/* SPI downlink head alignment. */
+static ssize_t down_head_align_show(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_down_head_align);
+}
+
+static DRIVER_ATTR_RO(down_head_align);
+
+/* SPI downlink tail alignment. */
+static ssize_t down_tail_align_show(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_down_tail_align);
+}
+
+static DRIVER_ATTR_RO(down_tail_align);
+
+/* SPI frame alignment. */
+static ssize_t frame_align_show(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_frm_align);
+}
+
+static DRIVER_ATTR_RO(frame_align);
+
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
+{
+	u8 *dst = buf;
+	caif_assert(buf);
+
+	if (cfspi->slave && !cfspi->slave_talked)
+		cfspi->slave_talked = true;
+
+	do {
+		struct sk_buff *skb;
+		struct caif_payload_info *info;
+		int spad = 0;
+		int epad;
+
+		skb = skb_dequeue(&cfspi->chead);
+		if (!skb)
+			break;
+
+		/*
+		 * Calculate length of frame including SPI padding.
+		 * The payload position is found in the control buffer.
+		 */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		/*
+		 * Compute head offset i.e. number of bytes to add to
+		 * get the start of the payload aligned.
+		 */
+		if (spi_up_head_align > 1) {
+			spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
+			*dst = (u8)(spad - 1);
+			dst += spad;
+		}
+
+		/* Copy in CAIF frame. */
+		skb_copy_bits(skb, 0, dst, skb->len);
+		dst += skb->len;
+		cfspi->ndev->stats.tx_packets++;
+		cfspi->ndev->stats.tx_bytes += skb->len;
+
+		/*
+		 * Compute tail offset i.e. number of bytes to add to
+		 * get the complete CAIF frame aligned.
+		 */
+		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
+		dst += epad;
+
+		dev_kfree_skb(skb);
+
+	} while ((dst - buf) < len);
+
+	return dst - buf;
+}
+
+int cfspi_xmitlen(struct cfspi *cfspi)
+{
+	struct sk_buff *skb = NULL;
+	int frm_len = 0;
+	int pkts = 0;
+
+	/*
+	 * Decommit previously committed frames.
+	 * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
+	 */
+	while (skb_peek(&cfspi->chead)) {
+		skb = skb_dequeue_tail(&cfspi->chead);
+		skb_queue_head(&cfspi->qhead, skb);
+	}
+
+	do {
+		struct caif_payload_info *info = NULL;
+		int spad = 0;
+		int epad = 0;
+
+		skb = skb_dequeue(&cfspi->qhead);
+		if (!skb)
+			break;
+
+		/*
+		 * Calculate length of frame including SPI padding.
+		 * The payload position is found in the control buffer.
+		 */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		/*
+		 * Compute head offset i.e. number of bytes to add to
+		 * get the start of the payload aligned.
+		 */
+		if (spi_up_head_align > 1)
+			spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
+
+		/*
+		 * Compute tail offset i.e. number of bytes to add to
+		 * get the complete CAIF frame aligned.
+		 */
+		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
+
+		if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
+			skb_queue_tail(&cfspi->chead, skb);
+			pkts++;
+			frm_len += skb->len + spad + epad;
+		} else {
+			/* Put back packet. */
+			skb_queue_head(&cfspi->qhead, skb);
+			break;
+		}
+	} while (pkts <= CAIF_MAX_SPI_PKTS);
+
+	/*
+	 * Send flow on if previously sent flow off
+	 * and now go below the low water mark
+	 */
+	if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
+		cfspi->cfdev.flowctrl) {
+		cfspi->flow_off_sent = 0;
+		cfspi->cfdev.flowctrl(cfspi->ndev, 1);
+	}
+
+	return frm_len;
+}
+
+static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
+{
+	struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+
+	/*
+	 * The slave device is the master on the link. Interrupts before the
+	 * slave has transmitted are considered spurious.
+	 */
+	if (cfspi->slave && !cfspi->slave_talked) {
+		printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+		return;
+	}
+
+	if (!in_interrupt())
+		spin_lock(&cfspi->lock);
+	if (assert) {
+		set_bit(SPI_SS_ON, &cfspi->state);
+		set_bit(SPI_XFER, &cfspi->state);
+	} else {
+		set_bit(SPI_SS_OFF, &cfspi->state);
+	}
+	if (!in_interrupt())
+		spin_unlock(&cfspi->lock);
+
+	/* Wake up the xfer thread. */
+	if (assert)
+		wake_up_interruptible(&cfspi->wait);
+}
+
+static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
+{
+	struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+
+	/* Transfer done, complete work queue */
+	complete(&cfspi->comp);
+}
+
+static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct cfspi *cfspi = NULL;
+	unsigned long flags;
+	if (!dev)
+		return -EINVAL;
+
+	cfspi = netdev_priv(dev);
+
+	skb_queue_tail(&cfspi->qhead, skb);
+
+	spin_lock_irqsave(&cfspi->lock, flags);
+	if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
+		/* Wake up xfer thread. */
+		wake_up_interruptible(&cfspi->wait);
+	}
+	spin_unlock_irqrestore(&cfspi->lock, flags);
+
+	/* Send flow off if number of bytes is above high water mark */
+	if (!cfspi->flow_off_sent &&
+		cfspi->qhead.qlen > cfspi->qd_high_mark &&
+		cfspi->cfdev.flowctrl) {
+		cfspi->flow_off_sent = 1;
+		cfspi->cfdev.flowctrl(cfspi->ndev, 0);
+	}
+
+	return 0;
+}
+
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
+{
+	u8 *src = buf;
+
+	caif_assert(buf != NULL);
+
+	do {
+		int res;
+		struct sk_buff *skb = NULL;
+		int spad = 0;
+		int epad = 0;
+		int pkt_len = 0;
+
+		/*
+		 * Compute head offset i.e. number of bytes added to
+		 * get the start of the payload aligned.
+		 */
+		if (spi_down_head_align > 1) {
+			spad = 1 + *src;
+			src += spad;
+		}
+
+		/* Read length of CAIF frame (little endian). */
+		pkt_len = *src;
+		pkt_len |= ((*(src+1)) << 8) & 0xFF00;
+		pkt_len += 2;	/* Add FCS fields. */
+
+		/* Get a suitable caif packet and copy in data. */
+
+		skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
+		caif_assert(skb != NULL);
+
+		skb_put_data(skb, src, pkt_len);
+		src += pkt_len;
+
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+
+		/*
+		 * Push received packet up the stack.
+		 */
+		if (!spi_loop)
+			res = netif_rx_ni(skb);
+		else
+			res = cfspi_xmit(skb, cfspi->ndev);
+
+		if (!res) {
+			cfspi->ndev->stats.rx_packets++;
+			cfspi->ndev->stats.rx_bytes += pkt_len;
+		} else
+			cfspi->ndev->stats.rx_dropped++;
+
+		/*
+		 * Compute tail offset i.e. number of bytes added to
+		 * get the complete CAIF frame aligned.
+		 */
+		epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
+		src += epad;
+	} while ((src - buf) < len);
+
+	return src - buf;
+}
+
+static int cfspi_open(struct net_device *dev)
+{
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int cfspi_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int cfspi_init(struct net_device *dev)
+{
+	int res = 0;
+	struct cfspi *cfspi = netdev_priv(dev);
+
+	/* Set flow info. */
+	cfspi->flow_off_sent = 0;
+	cfspi->qd_low_mark = LOW_WATER_MARK;
+	cfspi->qd_high_mark = HIGH_WATER_MARK;
+
+	/* Set slave info. */
+	if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+		cfspi->slave = true;
+		cfspi->slave_talked = false;
+	} else {
+		cfspi->slave = false;
+		cfspi->slave_talked = false;
+	}
+
+	/* Allocate DMA buffers. */
+	cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+	if (!cfspi->xfer.va_tx[0]) {
+		res = -ENODEV;
+		goto err_dma_alloc_tx_0;
+	}
+
+	cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
+
+	if (!cfspi->xfer.va_rx) {
+		res = -ENODEV;
+		goto err_dma_alloc_rx;
+	}
+
+	/* Initialize the work queue. */
+	INIT_WORK(&cfspi->work, cfspi_xfer);
+
+	/* Initialize spin locks. */
+	spin_lock_init(&cfspi->lock);
+
+	/* Initialize flow control state. */
+	cfspi->flow_stop = false;
+
+	/* Initialize wait queue. */
+	init_waitqueue_head(&cfspi->wait);
+
+	/* Create work thread. */
+	cfspi->wq = create_singlethread_workqueue(dev->name);
+	if (!cfspi->wq) {
+		printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
+		res = -ENODEV;
+		goto err_create_wq;
+	}
+
+	/* Initialize work queue. */
+	init_completion(&cfspi->comp);
+
+	/* Create debugfs entries. */
+	dev_debugfs_add(cfspi);
+
+	/* Set up the ifc. */
+	cfspi->ifc.ss_cb = cfspi_ss_cb;
+	cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
+	cfspi->ifc.priv = cfspi;
+
+	/* Add CAIF SPI device to list. */
+	spin_lock(&cfspi_list_lock);
+	list_add_tail(&cfspi->list, &cfspi_list);
+	spin_unlock(&cfspi_list_lock);
+
+	/* Schedule the work queue. */
+	queue_work(cfspi->wq, &cfspi->work);
+
+	return 0;
+
+ err_create_wq:
+	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+	dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+	return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+	struct cfspi *cfspi = netdev_priv(dev);
+
+	/* Remove from list. */
+	spin_lock(&cfspi_list_lock);
+	list_del(&cfspi->list);
+	spin_unlock(&cfspi_list_lock);
+
+	cfspi->ndev = NULL;
+	/* Free DMA buffers. */
+	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+	dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+	set_bit(SPI_TERMINATE, &cfspi->state);
+	wake_up_interruptible(&cfspi->wait);
+	destroy_workqueue(cfspi->wq);
+	/* Destroy debugfs directory and files. */
+	dev_debugfs_rem(cfspi);
+	return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+	.ndo_open = cfspi_open,
+	.ndo_stop = cfspi_close,
+	.ndo_init = cfspi_init,
+	.ndo_uninit = cfspi_uninit,
+	.ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+	struct cfspi *cfspi = netdev_priv(dev);
+	dev->features = 0;
+	dev->netdev_ops = &cfspi_ops;
+	dev->type = ARPHRD_CAIF;
+	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+	dev->priv_flags |= IFF_NO_QUEUE;
+	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+	dev->needs_free_netdev = true;
+	skb_queue_head_init(&cfspi->qhead);
+	skb_queue_head_init(&cfspi->chead);
+	cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+	cfspi->cfdev.use_frag = false;
+	cfspi->cfdev.use_stx = false;
+	cfspi->cfdev.use_fcs = false;
+	cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+	struct cfspi *cfspi = NULL;
+	struct net_device *ndev;
+	struct cfspi_dev *dev;
+	int res;
+	dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+	if (!dev)
+		return -ENODEV;
+
+	ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
+			    NET_NAME_UNKNOWN, cfspi_setup);
+	if (!ndev)
+		return -ENOMEM;
+
+	cfspi = netdev_priv(ndev);
+	netif_stop_queue(ndev);
+	cfspi->ndev = ndev;
+	cfspi->pdev = pdev;
+
+	/* Assign the SPI device. */
+	cfspi->dev = dev;
+	/* Assign the device ifc to this SPI interface. */
+	dev->ifc = &cfspi->ifc;
+
+	/* Register network device. */
+	res = register_netdev(ndev);
+	if (res) {
+		printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
+		goto err_net_reg;
+	}
+	return res;
+
+ err_net_reg:
+	free_netdev(ndev);
+
+	return res;
+}
+
+int cfspi_spi_remove(struct platform_device *pdev)
+{
+	/* Everything is done in cfspi_uninit(). */
+	return 0;
+}
+
+static void __exit cfspi_exit_module(void)
+{
+	struct list_head *list_node;
+	struct list_head *n;
+	struct cfspi *cfspi = NULL;
+
+	list_for_each_safe(list_node, n, &cfspi_list) {
+		cfspi = list_entry(list_node, struct cfspi, list);
+		unregister_netdev(cfspi->ndev);
+	}
+
+	/* Destroy sysfs files. */
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_head_align);
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_tail_align);
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_head_align);
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_tail_align);
+	driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
+	/* Unregister platform driver. */
+	platform_driver_unregister(&cfspi_spi_driver);
+	/* Destroy debugfs root directory. */
+	driver_debugfs_remove();
+}
+
+static int __init cfspi_init_module(void)
+{
+	int result;
+
+	/* Initialize spin lock. */
+	spin_lock_init(&cfspi_list_lock);
+
+	/* Register platform driver. */
+	result = platform_driver_register(&cfspi_spi_driver);
+	if (result) {
+		printk(KERN_ERR "Could not register platform SPI driver.\n");
+		goto err_dev_register;
+	}
+
+	/* Create sysfs files. */
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_up_head_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 1.\n");
+		goto err_create_up_head_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_up_tail_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 2.\n");
+		goto err_create_up_tail_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_down_head_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 3.\n");
+		goto err_create_down_head_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_down_tail_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 4.\n");
+		goto err_create_down_tail_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_frame_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 5.\n");
+		goto err_create_frame_align;
+	}
+	driver_debugfs_create();
+	return result;
+
+ err_create_frame_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_tail_align);
+ err_create_down_tail_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_head_align);
+ err_create_down_head_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_tail_align);
+ err_create_up_tail_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_head_align);
+ err_create_up_head_align:
+	platform_driver_unregister(&cfspi_spi_driver);
+ err_dev_register:
+	return result;
+}
+
+module_init(cfspi_init_module);
+module_exit(cfspi_exit_module);
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
new file mode 100644
index 0000000..39ba2f8
--- /dev/null
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:  Daniel Martensson
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <net/caif/caif_spi.h>
+
+#ifndef CONFIG_CAIF_SPI_SYNC
+#define SPI_DATA_POS 0
+static inline int forward_to_spi_cmd(struct cfspi *cfspi)
+{
+	return cfspi->rx_cpck_len;
+}
+#else
+#define SPI_DATA_POS SPI_CMD_SZ
+static inline int forward_to_spi_cmd(struct cfspi *cfspi)
+{
+	return 0;
+}
+#endif
+
+int spi_frm_align = 2;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align   = 1 << 1;
+int spi_up_tail_align   = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
+
+#ifdef CONFIG_DEBUG_FS
+static inline void debugfs_store_prev(struct cfspi *cfspi)
+{
+	/* Store previous command for debugging reasons.*/
+	cfspi->pcmd = cfspi->cmd;
+	/* Store previous transfer. */
+	cfspi->tx_ppck_len = cfspi->tx_cpck_len;
+	cfspi->rx_ppck_len = cfspi->rx_cpck_len;
+}
+#else
+static inline void debugfs_store_prev(struct cfspi *cfspi)
+{
+}
+#endif
+
+void cfspi_xfer(struct work_struct *work)
+{
+	struct cfspi *cfspi;
+	u8 *ptr = NULL;
+	unsigned long flags;
+	int ret;
+	cfspi = container_of(work, struct cfspi, work);
+
+	/* Initialize state. */
+	cfspi->cmd = SPI_CMD_EOT;
+
+	for (;;) {
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
+
+		/* Wait for master talk or transmit event. */
+		wait_event_interruptible(cfspi->wait,
+				 test_bit(SPI_XFER, &cfspi->state) ||
+				 test_bit(SPI_TERMINATE, &cfspi->state));
+
+		if (test_bit(SPI_TERMINATE, &cfspi->state))
+			return;
+
+#if CFSPI_DBG_PREFILL
+		/* Prefill buffers for easier debugging. */
+		memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
+		memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
+#endif	/* CFSPI_DBG_PREFILL */
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
+
+	/* Check whether we have a committed frame. */
+		if (cfspi->tx_cpck_len) {
+			int len;
+
+			cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
+
+			/* Copy committed SPI frames after the SPI indication. */
+			ptr = (u8 *) cfspi->xfer.va_tx;
+			ptr += SPI_IND_SZ;
+			len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
+			WARN_ON(len != cfspi->tx_cpck_len);
+	}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
+
+		/* Get length of next frame to commit. */
+		cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
+
+		WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
+
+		/*
+		 * Add indication and length at the beginning of the frame,
+		 * using little endian.
+		 */
+		ptr = (u8 *) cfspi->xfer.va_tx;
+		*ptr++ = SPI_CMD_IND;
+		*ptr++ = (SPI_CMD_IND  & 0xFF00) >> 8;
+		*ptr++ = cfspi->tx_npck_len & 0x00FF;
+		*ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
+
+		/* Calculate length of DMAs. */
+		cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
+		cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
+
+		/* Add SPI TX frame alignment padding, if necessary. */
+		if (cfspi->tx_cpck_len &&
+			(cfspi->xfer.tx_dma_len % spi_frm_align)) {
+
+			cfspi->xfer.tx_dma_len += spi_frm_align -
+			    (cfspi->xfer.tx_dma_len % spi_frm_align);
+		}
+
+		/* Add SPI RX frame alignment padding, if necessary. */
+		if (cfspi->rx_cpck_len &&
+			(cfspi->xfer.rx_dma_len % spi_frm_align)) {
+
+			cfspi->xfer.rx_dma_len += spi_frm_align -
+			    (cfspi->xfer.rx_dma_len % spi_frm_align);
+		}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
+
+		/* Start transfer. */
+		ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
+		WARN_ON(ret);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
+
+		/*
+		 * TODO: We might be able to make an assumption if this is the
+		 * first loop. Make sure that minimum toggle time is respected.
+		 */
+		udelay(MIN_TRANSITION_TIME_USEC);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
+
+		/* Signal that we are ready to receive data. */
+		cfspi->dev->sig_xfer(true, cfspi->dev);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
+
+		/* Wait for transfer completion. */
+		wait_for_completion(&cfspi->comp);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
+
+		if (cfspi->cmd == SPI_CMD_EOT) {
+			/*
+			 * Clear the master talk bit. A xfer is always at
+			 *  least two bursts.
+			 */
+			clear_bit(SPI_SS_ON, &cfspi->state);
+		}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
+
+		/* Make sure that the minimum toggle time is respected. */
+		if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
+					cfspi->dev->clk_mhz) <
+			MIN_TRANSITION_TIME_USEC) {
+
+			udelay(MIN_TRANSITION_TIME_USEC -
+				SPI_XFER_TIME_USEC
+				(cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
+		}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
+
+		/* De-assert transfer signal. */
+		cfspi->dev->sig_xfer(false, cfspi->dev);
+
+		/* Check whether we received a CAIF packet. */
+		if (cfspi->rx_cpck_len) {
+			int len;
+
+			cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
+
+			/* Parse SPI frame. */
+			ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
+
+			len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
+			WARN_ON(len != cfspi->rx_cpck_len);
+		}
+
+		/* Check the next SPI command and length. */
+		ptr = (u8 *) cfspi->xfer.va_rx;
+
+		ptr += forward_to_spi_cmd(cfspi);
+
+		cfspi->cmd = *ptr++;
+		cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
+		cfspi->rx_npck_len = *ptr++;
+		cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
+
+		WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
+		WARN_ON(cfspi->cmd > SPI_CMD_EOT);
+
+		debugfs_store_prev(cfspi);
+
+		/* Check whether the master issued an EOT command. */
+		if (cfspi->cmd == SPI_CMD_EOT) {
+			/* Reset state. */
+			cfspi->tx_cpck_len = 0;
+			cfspi->rx_cpck_len = 0;
+		} else {
+			/* Update state. */
+			cfspi->tx_cpck_len = cfspi->tx_npck_len;
+			cfspi->rx_cpck_len = cfspi->rx_npck_len;
+		}
+
+		/*
+		 * Check whether we need to clear the xfer bit.
+		 * Spin lock needed for packet insertion.
+		 * Test and clear of different bits
+		 * are not supported.
+		 */
+		spin_lock_irqsave(&cfspi->lock, flags);
+		if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
+			&& !test_bit(SPI_SS_ON, &cfspi->state))
+			clear_bit(SPI_XFER, &cfspi->state);
+
+		spin_unlock_irqrestore(&cfspi->lock, flags);
+	}
+}
+
+struct platform_driver cfspi_spi_driver = {
+	.probe = cfspi_spi_probe,
+	.remove = cfspi_spi_remove,
+	.driver = {
+		   .name = "cfspi_sspi",
+		   .owner = THIS_MODULE,
+		   },
+};
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
new file mode 100644
index 0000000..2814e0d
--- /dev/null
+++ b/drivers/net/caif/caif_virtio.c
@@ -0,0 +1,789 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2013
+ * Authors: Vicram Arv
+ *	    Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *	    Sjur Brendeland
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/virtio.h>
+#include <linux/vringh.h>
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_caif.h>
+#include <linux/virtio_ring.h>
+#include <linux/dma-mapping.h>
+#include <net/caif/caif_dev.h>
+#include <linux/virtio_config.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vicram Arv");
+MODULE_AUTHOR("Sjur Brendeland");
+MODULE_DESCRIPTION("Virtio CAIF Driver");
+
+/* NAPI schedule quota */
+#define CFV_DEFAULT_QUOTA 32
+
+/* Defaults used if virtio config space is unavailable */
+#define CFV_DEF_MTU_SIZE 4096
+#define CFV_DEF_HEADROOM 32
+#define CFV_DEF_TAILROOM 32
+
+/* Required IP header alignment */
+#define IP_HDR_ALIGN 4
+
+/* struct cfv_napi_contxt - NAPI context info
+ * @riov: IOV holding data read from the ring. Note that riov may
+ *	  still hold data when cfv_rx_poll() returns.
+ * @head: Last descriptor ID we received from vringh_getdesc_kern.
+ *	  We use this to put descriptor back on the used ring. USHRT_MAX is
+ *	  used to indicate invalid head-id.
+ */
+struct cfv_napi_context {
+	struct vringh_kiov riov;
+	unsigned short head;
+};
+
+/* struct cfv_stats - statistics for debugfs
+ * @rx_napi_complete:	Number of NAPI completions (RX)
+ * @rx_napi_resched:	Number of calls where the full quota was used (RX)
+ * @rx_nomem:		Number of SKB alloc failures (RX)
+ * @rx_kicks:		Number of RX kicks
+ * @tx_full_ring:	Number times TX ring was full
+ * @tx_no_mem:		Number of times TX went out of memory
+ * @tx_flow_on:		Number of flow on (TX)
+ * @tx_kicks:		Number of TX kicks
+ */
+struct cfv_stats {
+	u32 rx_napi_complete;
+	u32 rx_napi_resched;
+	u32 rx_nomem;
+	u32 rx_kicks;
+	u32 tx_full_ring;
+	u32 tx_no_mem;
+	u32 tx_flow_on;
+	u32 tx_kicks;
+};
+
+/* struct cfv_info - Caif Virtio control structure
+ * @cfdev:	caif common header
+ * @vdev:	Associated virtio device
+ * @vr_rx:	rx/downlink host vring
+ * @vq_tx:	tx/uplink virtqueue
+ * @ndev:	CAIF link layer device
+ * @watermark_tx: indicates number of free descriptors we need
+ *		to reopen the tx-queues after overload.
+ * @tx_lock:	protects vq_tx from concurrent use
+ * @tx_release_tasklet: Tasklet for freeing consumed TX buffers
+ * @napi:       Napi context used in cfv_rx_poll()
+ * @ctx:        Context data used in cfv_rx_poll()
+ * @tx_hr:	transmit headroom
+ * @rx_hr:	receive headroom
+ * @tx_tr:	transmit tail room
+ * @rx_tr:	receive tail room
+ * @mtu:	transmit max size
+ * @mru:	receive max size
+ * @allocsz:    size of dma memory reserved for TX buffers
+ * @alloc_addr: virtual address to dma memory for TX buffers
+ * @alloc_dma:  dma address to dma memory for TX buffers
+ * @genpool:    Gen Pool used for allocating TX buffers
+ * @reserved_mem: Pointer to memory reserve allocated from genpool
+ * @reserved_size: Size of memory reserve allocated from genpool
+ * @stats:       Statistics exposed in sysfs
+ * @debugfs:    Debugfs dentry for statistic counters
+ */
+struct cfv_info {
+	struct caif_dev_common cfdev;
+	struct virtio_device *vdev;
+	struct vringh *vr_rx;
+	struct virtqueue *vq_tx;
+	struct net_device *ndev;
+	unsigned int watermark_tx;
+	/* Protect access to vq_tx */
+	spinlock_t tx_lock;
+	struct tasklet_struct tx_release_tasklet;
+	struct napi_struct napi;
+	struct cfv_napi_context ctx;
+	u16 tx_hr;
+	u16 rx_hr;
+	u16 tx_tr;
+	u16 rx_tr;
+	u32 mtu;
+	u32 mru;
+	size_t allocsz;
+	void *alloc_addr;
+	dma_addr_t alloc_dma;
+	struct gen_pool *genpool;
+	unsigned long reserved_mem;
+	size_t reserved_size;
+	struct cfv_stats stats;
+	struct dentry *debugfs;
+};
+
+/* struct buf_info - maintains transmit buffer data handle
+ * @size:	size of transmit buffer
+ * @dma_handle: handle to allocated dma device memory area
+ * @vaddr:	virtual address mapping to allocated memory area
+ */
+struct buf_info {
+	size_t size;
+	u8 *vaddr;
+};
+
+/* Called from virtio device, in IRQ context */
+static void cfv_release_cb(struct virtqueue *vq_tx)
+{
+	struct cfv_info *cfv = vq_tx->vdev->priv;
+
+	++cfv->stats.tx_kicks;
+	tasklet_schedule(&cfv->tx_release_tasklet);
+}
+
+static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
+{
+	if (!buf_info)
+		return;
+	gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
+		      buf_info->size);
+	kfree(buf_info);
+}
+
+/* This is invoked whenever the remote processor completed processing
+ * a TX msg we just sent, and the buffer is put back to the used ring.
+ */
+static void cfv_release_used_buf(struct virtqueue *vq_tx)
+{
+	struct cfv_info *cfv = vq_tx->vdev->priv;
+	unsigned long flags;
+
+	BUG_ON(vq_tx != cfv->vq_tx);
+
+	for (;;) {
+		unsigned int len;
+		struct buf_info *buf_info;
+
+		/* Get used buffer from used ring to recycle used descriptors */
+		spin_lock_irqsave(&cfv->tx_lock, flags);
+		buf_info = virtqueue_get_buf(vq_tx, &len);
+		spin_unlock_irqrestore(&cfv->tx_lock, flags);
+
+		/* Stop looping if there are no more buffers to free */
+		if (!buf_info)
+			break;
+
+		free_buf_info(cfv, buf_info);
+
+		/* watermark_tx indicates if we previously stopped the tx
+		 * queues. If we have enough free stots in the virtio ring,
+		 * re-establish memory reserved and open up tx queues.
+		 */
+		if (cfv->vq_tx->num_free <= cfv->watermark_tx)
+			continue;
+
+		/* Re-establish memory reserve */
+		if (cfv->reserved_mem == 0 && cfv->genpool)
+			cfv->reserved_mem =
+				gen_pool_alloc(cfv->genpool,
+					       cfv->reserved_size);
+
+		/* Open up the tx queues */
+		if (cfv->reserved_mem) {
+			cfv->watermark_tx =
+				virtqueue_get_vring_size(cfv->vq_tx);
+			netif_tx_wake_all_queues(cfv->ndev);
+			/* Buffers are recycled in cfv_netdev_tx, so
+			 * disable notifications when queues are opened.
+			 */
+			virtqueue_disable_cb(cfv->vq_tx);
+			++cfv->stats.tx_flow_on;
+		} else {
+			/* if no memory reserve, wait for more free slots */
+			WARN_ON(cfv->watermark_tx >
+			       virtqueue_get_vring_size(cfv->vq_tx));
+			cfv->watermark_tx +=
+				virtqueue_get_vring_size(cfv->vq_tx) / 4;
+		}
+	}
+}
+
+/* Allocate a SKB and copy packet data to it */
+static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
+					      struct cfv_info *cfv,
+					      u8 *frm, u32 frm_len)
+{
+	struct sk_buff *skb;
+	u32 cfpkt_len, pad_len;
+
+	*err = 0;
+	/* Verify that packet size with down-link header and mtu size */
+	if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
+		netdev_err(cfv->ndev,
+			   "Invalid frmlen:%u  mtu:%u hr:%d tr:%d\n",
+			   frm_len, cfv->mru,  cfv->rx_hr,
+			   cfv->rx_tr);
+		*err = -EPROTO;
+		return NULL;
+	}
+
+	cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
+	pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
+
+	skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
+	if (!skb) {
+		*err = -ENOMEM;
+		return NULL;
+	}
+
+	skb_reserve(skb, cfv->rx_hr + pad_len);
+
+	skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len);
+	return skb;
+}
+
+/* Get packets from the host vring */
+static int cfv_rx_poll(struct napi_struct *napi, int quota)
+{
+	struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
+	int rxcnt = 0;
+	int err = 0;
+	void *buf;
+	struct sk_buff *skb;
+	struct vringh_kiov *riov = &cfv->ctx.riov;
+	unsigned int skb_len;
+
+	do {
+		skb = NULL;
+
+		/* Put the previous iovec back on the used ring and
+		 * fetch a new iovec if we have processed all elements.
+		 */
+		if (riov->i == riov->used) {
+			if (cfv->ctx.head != USHRT_MAX) {
+				vringh_complete_kern(cfv->vr_rx,
+						     cfv->ctx.head,
+						     0);
+				cfv->ctx.head = USHRT_MAX;
+			}
+
+			err = vringh_getdesc_kern(
+				cfv->vr_rx,
+				riov,
+				NULL,
+				&cfv->ctx.head,
+				GFP_ATOMIC);
+
+			if (err <= 0)
+				goto exit;
+		}
+
+		buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
+		/* TODO: Add check on valid buffer address */
+
+		skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
+					     riov->iov[riov->i].iov_len);
+		if (unlikely(err))
+			goto exit;
+
+		/* Push received packet up the stack. */
+		skb_len = skb->len;
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfv->ndev;
+		err = netif_receive_skb(skb);
+		if (unlikely(err)) {
+			++cfv->ndev->stats.rx_dropped;
+		} else {
+			++cfv->ndev->stats.rx_packets;
+			cfv->ndev->stats.rx_bytes += skb_len;
+		}
+
+		++riov->i;
+		++rxcnt;
+	} while (rxcnt < quota);
+
+	++cfv->stats.rx_napi_resched;
+	goto out;
+
+exit:
+	switch (err) {
+	case 0:
+		++cfv->stats.rx_napi_complete;
+
+		/* Really out of patckets? (stolen from virtio_net)*/
+		napi_complete(napi);
+		if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
+		    napi_schedule_prep(napi)) {
+			vringh_notify_disable_kern(cfv->vr_rx);
+			__napi_schedule(napi);
+		}
+		break;
+
+	case -ENOMEM:
+		++cfv->stats.rx_nomem;
+		dev_kfree_skb(skb);
+		/* Stop NAPI poll on OOM, we hope to be polled later */
+		napi_complete(napi);
+		vringh_notify_enable_kern(cfv->vr_rx);
+		break;
+
+	default:
+		/* We're doomed, any modem fault is fatal */
+		netdev_warn(cfv->ndev, "Bad ring, disable device\n");
+		cfv->ndev->stats.rx_dropped = riov->used - riov->i;
+		napi_complete(napi);
+		vringh_notify_disable_kern(cfv->vr_rx);
+		netif_carrier_off(cfv->ndev);
+		break;
+	}
+out:
+	if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
+		vringh_notify(cfv->vr_rx);
+	return rxcnt;
+}
+
+static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
+{
+	struct cfv_info *cfv = vdev->priv;
+
+	++cfv->stats.rx_kicks;
+	vringh_notify_disable_kern(cfv->vr_rx);
+	napi_schedule(&cfv->napi);
+}
+
+static void cfv_destroy_genpool(struct cfv_info *cfv)
+{
+	if (cfv->alloc_addr)
+		dma_free_coherent(cfv->vdev->dev.parent->parent,
+				  cfv->allocsz, cfv->alloc_addr,
+				  cfv->alloc_dma);
+
+	if (!cfv->genpool)
+		return;
+	gen_pool_free(cfv->genpool,  cfv->reserved_mem,
+		      cfv->reserved_size);
+	gen_pool_destroy(cfv->genpool);
+	cfv->genpool = NULL;
+}
+
+static int cfv_create_genpool(struct cfv_info *cfv)
+{
+	int err;
+
+	/* dma_alloc can only allocate whole pages, and we need a more
+	 * fine graned allocation so we use genpool. We ask for space needed
+	 * by IP and a full ring. If the dma allcoation fails we retry with a
+	 * smaller allocation size.
+	 */
+	err = -ENOMEM;
+	cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
+			(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
+	if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
+		return -EINVAL;
+
+	for (;;) {
+		if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
+			netdev_info(cfv->ndev, "Not enough device memory\n");
+			return -ENOMEM;
+		}
+
+		cfv->alloc_addr = dma_alloc_coherent(
+						cfv->vdev->dev.parent->parent,
+						cfv->allocsz, &cfv->alloc_dma,
+						GFP_ATOMIC);
+		if (cfv->alloc_addr)
+			break;
+
+		cfv->allocsz = (cfv->allocsz * 3) >> 2;
+	}
+
+	netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
+		   cfv->allocsz);
+
+	/* Allocate on 128 bytes boundaries (1 << 7)*/
+	cfv->genpool = gen_pool_create(7, -1);
+	if (!cfv->genpool)
+		goto err;
+
+	err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
+				(phys_addr_t)virt_to_phys(cfv->alloc_addr),
+				cfv->allocsz, -1);
+	if (err)
+		goto err;
+
+	/* Reserve some memory for low memory situations. If we hit the roof
+	 * in the memory pool, we stop TX flow and release the reserve.
+	 */
+	cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
+	cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
+					   cfv->reserved_size);
+	if (!cfv->reserved_mem) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
+	return 0;
+err:
+	cfv_destroy_genpool(cfv);
+	return err;
+}
+
+/* Enable the CAIF interface and allocate the memory-pool */
+static int cfv_netdev_open(struct net_device *netdev)
+{
+	struct cfv_info *cfv = netdev_priv(netdev);
+
+	if (cfv_create_genpool(cfv))
+		return -ENOMEM;
+
+	netif_carrier_on(netdev);
+	napi_enable(&cfv->napi);
+
+	/* Schedule NAPI to read any pending packets */
+	napi_schedule(&cfv->napi);
+	return 0;
+}
+
+/* Disable the CAIF interface and free the memory-pool */
+static int cfv_netdev_close(struct net_device *netdev)
+{
+	struct cfv_info *cfv = netdev_priv(netdev);
+	unsigned long flags;
+	struct buf_info *buf_info;
+
+	/* Disable interrupts, queues and NAPI polling */
+	netif_carrier_off(netdev);
+	virtqueue_disable_cb(cfv->vq_tx);
+	vringh_notify_disable_kern(cfv->vr_rx);
+	napi_disable(&cfv->napi);
+
+	/* Release any TX buffers on both used and avilable rings */
+	cfv_release_used_buf(cfv->vq_tx);
+	spin_lock_irqsave(&cfv->tx_lock, flags);
+	while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
+		free_buf_info(cfv, buf_info);
+	spin_unlock_irqrestore(&cfv->tx_lock, flags);
+
+	/* Release all dma allocated memory and destroy the pool */
+	cfv_destroy_genpool(cfv);
+	return 0;
+}
+
+/* Allocate a buffer in dma-memory and copy skb to it */
+static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
+						       struct sk_buff *skb,
+						       struct scatterlist *sg)
+{
+	struct caif_payload_info *info = (void *)&skb->cb;
+	struct buf_info *buf_info = NULL;
+	u8 pad_len, hdr_ofs;
+
+	if (!cfv->genpool)
+		goto err;
+
+	if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
+		netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
+			    cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
+		goto err;
+	}
+
+	buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
+	if (unlikely(!buf_info))
+		goto err;
+
+	/* Make the IP header aligned in tbe buffer */
+	hdr_ofs = cfv->tx_hr + info->hdr_len;
+	pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
+	buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
+
+	/* allocate dma memory buffer */
+	buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
+	if (unlikely(!buf_info->vaddr))
+		goto err;
+
+	/* copy skbuf contents to send buffer */
+	skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
+	sg_init_one(sg, buf_info->vaddr + pad_len,
+		    skb->len + cfv->tx_hr + cfv->rx_hr);
+
+	return buf_info;
+err:
+	kfree(buf_info);
+	return NULL;
+}
+
+/* Put the CAIF packet on the virtio ring and kick the receiver */
+static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct cfv_info *cfv = netdev_priv(netdev);
+	struct buf_info *buf_info;
+	struct scatterlist sg;
+	unsigned long flags;
+	bool flow_off = false;
+	int ret;
+
+	/* garbage collect released buffers */
+	cfv_release_used_buf(cfv->vq_tx);
+	spin_lock_irqsave(&cfv->tx_lock, flags);
+
+	/* Flow-off check takes into account number of cpus to make sure
+	 * virtqueue will not be overfilled in any possible smp conditions.
+	 *
+	 * Flow-on is triggered when sufficient buffers are freed
+	 */
+	if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
+		flow_off = true;
+		cfv->stats.tx_full_ring++;
+	}
+
+	/* If we run out of memory, we release the memory reserve and retry
+	 * allocation.
+	 */
+	buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
+	if (unlikely(!buf_info)) {
+		cfv->stats.tx_no_mem++;
+		flow_off = true;
+
+		if (cfv->reserved_mem && cfv->genpool) {
+			gen_pool_free(cfv->genpool,  cfv->reserved_mem,
+				      cfv->reserved_size);
+			cfv->reserved_mem = 0;
+			buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
+		}
+	}
+
+	if (unlikely(flow_off)) {
+		/* Turn flow on when a 1/4 of the descriptors are released */
+		cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
+		/* Enable notifications of recycled TX buffers */
+		virtqueue_enable_cb(cfv->vq_tx);
+		netif_tx_stop_all_queues(netdev);
+	}
+
+	if (unlikely(!buf_info)) {
+		/* If the memory reserve does it's job, this shouldn't happen */
+		netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
+		goto err;
+	}
+
+	ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
+	if (unlikely((ret < 0))) {
+		/* If flow control works, this shouldn't happen */
+		netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
+			    ret);
+		goto err;
+	}
+
+	/* update netdev statistics */
+	cfv->ndev->stats.tx_packets++;
+	cfv->ndev->stats.tx_bytes += skb->len;
+	spin_unlock_irqrestore(&cfv->tx_lock, flags);
+
+	/* tell the remote processor it has a pending message to read */
+	virtqueue_kick(cfv->vq_tx);
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+err:
+	spin_unlock_irqrestore(&cfv->tx_lock, flags);
+	cfv->ndev->stats.tx_dropped++;
+	free_buf_info(cfv, buf_info);
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static void cfv_tx_release_tasklet(unsigned long drv)
+{
+	struct cfv_info *cfv = (struct cfv_info *)drv;
+	cfv_release_used_buf(cfv->vq_tx);
+}
+
+static const struct net_device_ops cfv_netdev_ops = {
+	.ndo_open = cfv_netdev_open,
+	.ndo_stop = cfv_netdev_close,
+	.ndo_start_xmit = cfv_netdev_tx,
+};
+
+static void cfv_netdev_setup(struct net_device *netdev)
+{
+	netdev->netdev_ops = &cfv_netdev_ops;
+	netdev->type = ARPHRD_CAIF;
+	netdev->tx_queue_len = 100;
+	netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	netdev->mtu = CFV_DEF_MTU_SIZE;
+	netdev->needs_free_netdev = true;
+}
+
+/* Create debugfs counters for the device */
+static inline void debugfs_init(struct cfv_info *cfv)
+{
+	cfv->debugfs =
+		debugfs_create_dir(netdev_name(cfv->ndev), NULL);
+
+	if (IS_ERR(cfv->debugfs))
+		return;
+
+	debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs,
+			   &cfv->stats.rx_napi_complete);
+	debugfs_create_u32("rx-napi-resched", 0400, cfv->debugfs,
+			   &cfv->stats.rx_napi_resched);
+	debugfs_create_u32("rx-nomem", 0400, cfv->debugfs,
+			   &cfv->stats.rx_nomem);
+	debugfs_create_u32("rx-kicks", 0400, cfv->debugfs,
+			   &cfv->stats.rx_kicks);
+	debugfs_create_u32("tx-full-ring", 0400, cfv->debugfs,
+			   &cfv->stats.tx_full_ring);
+	debugfs_create_u32("tx-no-mem", 0400, cfv->debugfs,
+			   &cfv->stats.tx_no_mem);
+	debugfs_create_u32("tx-kicks", 0400, cfv->debugfs,
+			   &cfv->stats.tx_kicks);
+	debugfs_create_u32("tx-flow-on", 0400, cfv->debugfs,
+			   &cfv->stats.tx_flow_on);
+}
+
+/* Setup CAIF for the a virtio device */
+static int cfv_probe(struct virtio_device *vdev)
+{
+	vq_callback_t *vq_cbs = cfv_release_cb;
+	vrh_callback_t *vrh_cbs = cfv_recv;
+	const char *names =  "output";
+	const char *cfv_netdev_name = "cfvrt";
+	struct net_device *netdev;
+	struct cfv_info *cfv;
+	int err = -EINVAL;
+
+	netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
+			      NET_NAME_UNKNOWN, cfv_netdev_setup);
+	if (!netdev)
+		return -ENOMEM;
+
+	cfv = netdev_priv(netdev);
+	cfv->vdev = vdev;
+	cfv->ndev = netdev;
+
+	spin_lock_init(&cfv->tx_lock);
+
+	/* Get the RX virtio ring. This is a "host side vring". */
+	err = -ENODEV;
+	if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs)
+		goto err;
+
+	err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
+	if (err)
+		goto err;
+
+	/* Get the TX virtio ring. This is a "guest side vring". */
+	err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL);
+	if (err)
+		goto err;
+
+	/* Get the CAIF configuration from virtio config space, if available */
+	if (vdev->config->get) {
+		virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+			     &cfv->tx_hr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+			     &cfv->rx_hr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+			     &cfv->tx_tr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+			     &cfv->rx_tr);
+		virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+			     &cfv->mtu);
+		virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+			     &cfv->mru);
+	} else {
+		cfv->tx_hr = CFV_DEF_HEADROOM;
+		cfv->rx_hr = CFV_DEF_HEADROOM;
+		cfv->tx_tr = CFV_DEF_TAILROOM;
+		cfv->rx_tr = CFV_DEF_TAILROOM;
+		cfv->mtu = CFV_DEF_MTU_SIZE;
+		cfv->mru = CFV_DEF_MTU_SIZE;
+	}
+
+	netdev->needed_headroom = cfv->tx_hr;
+	netdev->needed_tailroom = cfv->tx_tr;
+
+	/* Disable buffer release interrupts unless we have stopped TX queues */
+	virtqueue_disable_cb(cfv->vq_tx);
+
+	netdev->mtu = cfv->mtu - cfv->tx_tr;
+	vdev->priv = cfv;
+
+	/* Initialize NAPI poll context data */
+	vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
+	cfv->ctx.head = USHRT_MAX;
+	netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
+
+	tasklet_init(&cfv->tx_release_tasklet,
+		     cfv_tx_release_tasklet,
+		     (unsigned long)cfv);
+
+	/* Carrier is off until netdevice is opened */
+	netif_carrier_off(netdev);
+
+	/* register Netdev */
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
+		goto err;
+	}
+
+	debugfs_init(cfv);
+
+	return 0;
+err:
+	netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
+
+	if (cfv->vr_rx)
+		vdev->vringh_config->del_vrhs(cfv->vdev);
+	if (cfv->vdev)
+		vdev->config->del_vqs(cfv->vdev);
+	free_netdev(netdev);
+	return err;
+}
+
+static void cfv_remove(struct virtio_device *vdev)
+{
+	struct cfv_info *cfv = vdev->priv;
+
+	rtnl_lock();
+	dev_close(cfv->ndev);
+	rtnl_unlock();
+
+	tasklet_kill(&cfv->tx_release_tasklet);
+	debugfs_remove_recursive(cfv->debugfs);
+
+	vringh_kiov_cleanup(&cfv->ctx.riov);
+	vdev->config->reset(vdev);
+	vdev->vringh_config->del_vrhs(cfv->vdev);
+	cfv->vr_rx = NULL;
+	vdev->config->del_vqs(cfv->vdev);
+	unregister_netdev(cfv->ndev);
+}
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static unsigned int features[] = {
+};
+
+static struct virtio_driver caif_virtio_driver = {
+	.feature_table		= features,
+	.feature_table_size	= ARRAY_SIZE(features),
+	.driver.name		= KBUILD_MODNAME,
+	.driver.owner		= THIS_MODULE,
+	.id_table		= id_table,
+	.probe			= cfv_probe,
+	.remove			= cfv_remove,
+};
+
+module_virtio_driver(caif_virtio_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);