v4.19.13 snapshot.
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
new file mode 100644
index 0000000..f932923
--- /dev/null
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -0,0 +1,138 @@
+#
+# TI device configuration
+#
+
+config NET_VENDOR_TI
+	bool "Texas Instruments (TI) devices"
+	default y
+	depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about TI devices. If you say Y, you will be asked for
+	  your specific card in the following questions.
+
+if NET_VENDOR_TI
+
+config TI_DAVINCI_EMAC
+	tristate "TI DaVinci EMAC Support"
+	depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
+	select TI_DAVINCI_MDIO
+	select TI_DAVINCI_CPDMA
+	select PHYLIB
+	---help---
+	  This driver supports TI's DaVinci Ethernet .
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called davinci_emac_driver.  This is recommended.
+
+config TI_DAVINCI_MDIO
+	tristate "TI DaVinci MDIO Support"
+	depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+	select PHYLIB
+	---help---
+	  This driver supports TI's DaVinci MDIO module.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called davinci_mdio.  This is recommended.
+
+config TI_DAVINCI_CPDMA
+	tristate "TI DaVinci CPDMA Support"
+	depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+	select GENERIC_ALLOCATOR
+	---help---
+	  This driver supports TI's DaVinci CPDMA dma engine.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called davinci_cpdma.  This is recommended.
+
+config TI_CPSW_PHY_SEL
+	bool
+	---help---
+	  This driver supports configuring of the phy mode connected to
+	  the CPSW.
+
+config TI_CPSW_ALE
+	tristate "TI CPSW ALE Support"
+	---help---
+	  This driver supports TI's CPSW ALE module.
+
+config TI_CPSW
+	tristate "TI CPSW Switch Support"
+	depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+	select TI_DAVINCI_CPDMA
+	select TI_DAVINCI_MDIO
+	select TI_CPSW_PHY_SEL
+	select TI_CPSW_ALE
+	select MFD_SYSCON
+	select REGMAP
+	---help---
+	  This driver supports TI's CPSW Ethernet Switch.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cpsw.
+
+config TI_CPTS
+	bool "TI Common Platform Time Sync (CPTS) Support"
+	depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
+	depends on POSIX_TIMERS
+	---help---
+	  This driver supports the Common Platform Time Sync unit of
+	  the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
+	  The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
+	  driver offers a PTP Hardware Clock.
+
+config TI_CPTS_MOD
+	tristate
+	depends on TI_CPTS
+	default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+	select NET_PTP_CLASSIFY
+	imply PTP_1588_CLOCK
+	default m
+
+config TI_KEYSTONE_NETCP
+	tristate "TI Keystone NETCP Core Support"
+	select TI_CPSW_ALE
+	select TI_DAVINCI_MDIO
+	depends on OF
+	depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+	---help---
+	  This driver supports TI's Keystone NETCP Core.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called keystone_netcp.
+
+config TI_KEYSTONE_NETCP_ETHSS
+	depends on TI_KEYSTONE_NETCP
+	tristate "TI Keystone NETCP Ethernet subsystem Support"
+	---help---
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called keystone_netcp_ethss.
+
+config TLAN
+	tristate "TI ThunderLAN support"
+	depends on (PCI || EISA)
+	---help---
+	  If you have a PCI Ethernet network card based on the ThunderLAN chip
+	  which is supported by this driver, say Y here.
+
+	  Devices currently supported by this driver are Compaq Netelligent,
+	  Compaq NetFlex and Olicom cards.  Please read the file
+	  <file:Documentation/networking/tlan.txt> for more details.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called tlan.
+
+	  Please email feedback to <torben.mathiasen@compaq.com>.
+
+config CPMAC
+	tristate "TI AR7 CPMAC Ethernet support"
+	depends on AR7
+	select PHYLIB
+	---help---
+	  TI AR7 CPMAC Ethernet support
+
+endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
new file mode 100644
index 0000000..0be551d
--- /dev/null
+++ b/drivers/net/ethernet/ti/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the TI network device drivers.
+#
+
+obj-$(CONFIG_TI_CPSW) += cpsw-common.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
+
+obj-$(CONFIG_TLAN) += tlan.o
+obj-$(CONFIG_CPMAC) += cpmac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
+obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
+obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
+obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
+ti_cpsw-y := cpsw.o
+
+obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
+keystone_netcp-y := netcp_core.o
+obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
+keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
new file mode 100644
index 0000000..9b8a30b
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -0,0 +1,1270 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/atomic.h>
+
+#include <asm/mach-ar7/ar7.h>
+
+MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cpmac");
+
+static int debug_level = 8;
+static int dumb_switch;
+
+/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
+module_param(debug_level, int, 0444);
+module_param(dumb_switch, int, 0444);
+
+MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
+MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
+
+#define CPMAC_VERSION "0.5.2"
+/* frame size + 802.1q tag + FCS size */
+#define CPMAC_SKB_SIZE		(ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define CPMAC_QUEUES	8
+
+/* Ethernet registers */
+#define CPMAC_TX_CONTROL		0x0004
+#define CPMAC_TX_TEARDOWN		0x0008
+#define CPMAC_RX_CONTROL		0x0014
+#define CPMAC_RX_TEARDOWN		0x0018
+#define CPMAC_MBP			0x0100
+#define MBP_RXPASSCRC			0x40000000
+#define MBP_RXQOS			0x20000000
+#define MBP_RXNOCHAIN			0x10000000
+#define MBP_RXCMF			0x01000000
+#define MBP_RXSHORT			0x00800000
+#define MBP_RXCEF			0x00400000
+#define MBP_RXPROMISC			0x00200000
+#define MBP_PROMISCCHAN(channel)	(((channel) & 0x7) << 16)
+#define MBP_RXBCAST			0x00002000
+#define MBP_BCASTCHAN(channel)		(((channel) & 0x7) << 8)
+#define MBP_RXMCAST			0x00000020
+#define MBP_MCASTCHAN(channel)		((channel) & 0x7)
+#define CPMAC_UNICAST_ENABLE		0x0104
+#define CPMAC_UNICAST_CLEAR		0x0108
+#define CPMAC_MAX_LENGTH		0x010c
+#define CPMAC_BUFFER_OFFSET		0x0110
+#define CPMAC_MAC_CONTROL		0x0160
+#define MAC_TXPTYPE			0x00000200
+#define MAC_TXPACE			0x00000040
+#define MAC_MII				0x00000020
+#define MAC_TXFLOW			0x00000010
+#define MAC_RXFLOW			0x00000008
+#define MAC_MTEST			0x00000004
+#define MAC_LOOPBACK			0x00000002
+#define MAC_FDX				0x00000001
+#define CPMAC_MAC_STATUS		0x0164
+#define MAC_STATUS_QOS			0x00000004
+#define MAC_STATUS_RXFLOW		0x00000002
+#define MAC_STATUS_TXFLOW		0x00000001
+#define CPMAC_TX_INT_ENABLE		0x0178
+#define CPMAC_TX_INT_CLEAR		0x017c
+#define CPMAC_MAC_INT_VECTOR		0x0180
+#define MAC_INT_STATUS			0x00080000
+#define MAC_INT_HOST			0x00040000
+#define MAC_INT_RX			0x00020000
+#define MAC_INT_TX			0x00010000
+#define CPMAC_MAC_EOI_VECTOR		0x0184
+#define CPMAC_RX_INT_ENABLE		0x0198
+#define CPMAC_RX_INT_CLEAR		0x019c
+#define CPMAC_MAC_INT_ENABLE		0x01a8
+#define CPMAC_MAC_INT_CLEAR		0x01ac
+#define CPMAC_MAC_ADDR_LO(channel)	(0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_MID		0x01d0
+#define CPMAC_MAC_ADDR_HI		0x01d4
+#define CPMAC_MAC_HASH_LO		0x01d8
+#define CPMAC_MAC_HASH_HI		0x01dc
+#define CPMAC_TX_PTR(channel)		(0x0600 + (channel) * 4)
+#define CPMAC_RX_PTR(channel)		(0x0620 + (channel) * 4)
+#define CPMAC_TX_ACK(channel)		(0x0640 + (channel) * 4)
+#define CPMAC_RX_ACK(channel)		(0x0660 + (channel) * 4)
+#define CPMAC_REG_END			0x0680
+
+/* Rx/Tx statistics
+ * TODO: use some of them to fill stats in cpmac_stats()
+ */
+#define CPMAC_STATS_RX_GOOD		0x0200
+#define CPMAC_STATS_RX_BCAST		0x0204
+#define CPMAC_STATS_RX_MCAST		0x0208
+#define CPMAC_STATS_RX_PAUSE		0x020c
+#define CPMAC_STATS_RX_CRC		0x0210
+#define CPMAC_STATS_RX_ALIGN		0x0214
+#define CPMAC_STATS_RX_OVER		0x0218
+#define CPMAC_STATS_RX_JABBER		0x021c
+#define CPMAC_STATS_RX_UNDER		0x0220
+#define CPMAC_STATS_RX_FRAG		0x0224
+#define CPMAC_STATS_RX_FILTER		0x0228
+#define CPMAC_STATS_RX_QOSFILTER	0x022c
+#define CPMAC_STATS_RX_OCTETS		0x0230
+
+#define CPMAC_STATS_TX_GOOD		0x0234
+#define CPMAC_STATS_TX_BCAST		0x0238
+#define CPMAC_STATS_TX_MCAST		0x023c
+#define CPMAC_STATS_TX_PAUSE		0x0240
+#define CPMAC_STATS_TX_DEFER		0x0244
+#define CPMAC_STATS_TX_COLLISION	0x0248
+#define CPMAC_STATS_TX_SINGLECOLL	0x024c
+#define CPMAC_STATS_TX_MULTICOLL	0x0250
+#define CPMAC_STATS_TX_EXCESSCOLL	0x0254
+#define CPMAC_STATS_TX_LATECOLL		0x0258
+#define CPMAC_STATS_TX_UNDERRUN		0x025c
+#define CPMAC_STATS_TX_CARRIERSENSE	0x0260
+#define CPMAC_STATS_TX_OCTETS		0x0264
+
+#define cpmac_read(base, reg)		(readl((void __iomem *)(base) + (reg)))
+#define cpmac_write(base, reg, val)	(writel(val, (void __iomem *)(base) + \
+						(reg)))
+
+/* MDIO bus */
+#define CPMAC_MDIO_VERSION		0x0000
+#define CPMAC_MDIO_CONTROL		0x0004
+#define MDIOC_IDLE			0x80000000
+#define MDIOC_ENABLE			0x40000000
+#define MDIOC_PREAMBLE			0x00100000
+#define MDIOC_FAULT			0x00080000
+#define MDIOC_FAULTDETECT		0x00040000
+#define MDIOC_INTTEST			0x00020000
+#define MDIOC_CLKDIV(div)		((div) & 0xff)
+#define CPMAC_MDIO_ALIVE		0x0008
+#define CPMAC_MDIO_LINK			0x000c
+#define CPMAC_MDIO_ACCESS(channel)	(0x0080 + (channel) * 8)
+#define MDIO_BUSY			0x80000000
+#define MDIO_WRITE			0x40000000
+#define MDIO_REG(reg)			(((reg) & 0x1f) << 21)
+#define MDIO_PHY(phy)			(((phy) & 0x1f) << 16)
+#define MDIO_DATA(data)			((data) & 0xffff)
+#define CPMAC_MDIO_PHYSEL(channel)	(0x0084 + (channel) * 8)
+#define PHYSEL_LINKSEL			0x00000040
+#define PHYSEL_LINKINT			0x00000020
+
+struct cpmac_desc {
+	u32 hw_next;
+	u32 hw_data;
+	u16 buflen;
+	u16 bufflags;
+	u16 datalen;
+	u16 dataflags;
+#define CPMAC_SOP			0x8000
+#define CPMAC_EOP			0x4000
+#define CPMAC_OWN			0x2000
+#define CPMAC_EOQ			0x1000
+	struct sk_buff *skb;
+	struct cpmac_desc *next;
+	struct cpmac_desc *prev;
+	dma_addr_t mapping;
+	dma_addr_t data_mapping;
+};
+
+struct cpmac_priv {
+	spinlock_t lock;
+	spinlock_t rx_lock;
+	struct cpmac_desc *rx_head;
+	int ring_size;
+	struct cpmac_desc *desc_ring;
+	dma_addr_t dma_ring;
+	void __iomem *regs;
+	struct mii_bus *mii_bus;
+	char phy_name[MII_BUS_ID_SIZE + 3];
+	int oldlink, oldspeed, oldduplex;
+	u32 msg_enable;
+	struct net_device *dev;
+	struct work_struct reset_work;
+	struct platform_device *pdev;
+	struct napi_struct napi;
+	atomic_t reset_pending;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_hw_start(struct net_device *dev);
+static void cpmac_hw_stop(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+static void cpmac_dump_regs(struct net_device *dev)
+{
+	int i;
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	for (i = 0; i < CPMAC_REG_END; i += 4) {
+		if (i % 16 == 0) {
+			if (i)
+				printk("\n");
+			printk("%s: reg[%p]:", dev->name, priv->regs + i);
+		}
+		printk(" %08x", cpmac_read(priv->regs, i));
+	}
+	printk("\n");
+}
+
+static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
+{
+	int i;
+
+	printk("%s: desc[%p]:", dev->name, desc);
+	for (i = 0; i < sizeof(*desc) / 4; i++)
+		printk(" %08x", ((u32 *)desc)[i]);
+	printk("\n");
+}
+
+static void cpmac_dump_all_desc(struct net_device *dev)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+	struct cpmac_desc *dump = priv->rx_head;
+
+	do {
+		cpmac_dump_desc(dev, dump);
+		dump = dump->next;
+	} while (dump != priv->rx_head);
+}
+
+static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
+{
+	int i;
+
+	printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+	for (i = 0; i < skb->len; i++) {
+		if (i % 16 == 0) {
+			if (i)
+				printk("\n");
+			printk("%s: data[%p]:", dev->name, skb->data + i);
+		}
+		printk(" %02x", ((u8 *)skb->data)[i]);
+	}
+	printk("\n");
+}
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+	u32 val;
+
+	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+		cpu_relax();
+	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
+		    MDIO_PHY(phy_id));
+	while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
+		cpu_relax();
+
+	return MDIO_DATA(val);
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
+			    int reg, u16 val)
+{
+	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+		cpu_relax();
+	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
+		    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+
+	return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+	struct clk *cpmac_clk;
+
+	cpmac_clk = clk_get(&bus->dev, "cpmac");
+	if (IS_ERR(cpmac_clk)) {
+		pr_err("unable to get cpmac clock\n");
+		return -1;
+	}
+	ar7_device_reset(AR7_RESET_BIT_MDIO);
+	cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
+		    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
+
+	return 0;
+}
+
+static struct mii_bus *cpmac_mii;
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+	struct netdev_hw_addr *ha;
+	u8 tmp;
+	u32 mbp, bit, hash[2] = { 0, };
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	mbp = cpmac_read(priv->regs, CPMAC_MBP);
+	if (dev->flags & IFF_PROMISC) {
+		cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
+			    MBP_RXPROMISC);
+	} else {
+		cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
+		if (dev->flags & IFF_ALLMULTI) {
+			/* enable all multicast mode */
+			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
+			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
+		} else {
+			/* cpmac uses some strange mac address hashing
+			 * (not crc32)
+			 */
+			netdev_for_each_mc_addr(ha, dev) {
+				bit = 0;
+				tmp = ha->addr[0];
+				bit  ^= (tmp >> 2) ^ (tmp << 4);
+				tmp = ha->addr[1];
+				bit  ^= (tmp >> 4) ^ (tmp << 2);
+				tmp = ha->addr[2];
+				bit  ^= (tmp >> 6) ^ tmp;
+				tmp = ha->addr[3];
+				bit  ^= (tmp >> 2) ^ (tmp << 4);
+				tmp = ha->addr[4];
+				bit  ^= (tmp >> 4) ^ (tmp << 2);
+				tmp = ha->addr[5];
+				bit  ^= (tmp >> 6) ^ tmp;
+				bit &= 0x3f;
+				hash[bit / 32] |= 1 << (bit % 32);
+			}
+
+			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
+			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
+		}
+	}
+}
+
+static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
+				    struct cpmac_desc *desc)
+{
+	struct sk_buff *skb, *result = NULL;
+
+	if (unlikely(netif_msg_hw(priv)))
+		cpmac_dump_desc(priv->dev, desc);
+	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
+	if (unlikely(!desc->datalen)) {
+		if (netif_msg_rx_err(priv) && net_ratelimit())
+			netdev_warn(priv->dev, "rx: spurious interrupt\n");
+
+		return NULL;
+	}
+
+	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
+	if (likely(skb)) {
+		skb_put(desc->skb, desc->datalen);
+		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
+		skb_checksum_none_assert(desc->skb);
+		priv->dev->stats.rx_packets++;
+		priv->dev->stats.rx_bytes += desc->datalen;
+		result = desc->skb;
+		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
+				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
+		desc->skb = skb;
+		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
+						    CPMAC_SKB_SIZE,
+						    DMA_FROM_DEVICE);
+		desc->hw_data = (u32)desc->data_mapping;
+		if (unlikely(netif_msg_pktdata(priv))) {
+			netdev_dbg(priv->dev, "received packet:\n");
+			cpmac_dump_skb(priv->dev, result);
+		}
+	} else {
+		if (netif_msg_rx_err(priv) && net_ratelimit())
+			netdev_warn(priv->dev,
+				    "low on skbs, dropping packet\n");
+
+		priv->dev->stats.rx_dropped++;
+	}
+
+	desc->buflen = CPMAC_SKB_SIZE;
+	desc->dataflags = CPMAC_OWN;
+
+	return result;
+}
+
+static int cpmac_poll(struct napi_struct *napi, int budget)
+{
+	struct sk_buff *skb;
+	struct cpmac_desc *desc, *restart;
+	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
+	int received = 0, processed = 0;
+
+	spin_lock(&priv->rx_lock);
+	if (unlikely(!priv->rx_head)) {
+		if (netif_msg_rx_err(priv) && net_ratelimit())
+			netdev_warn(priv->dev, "rx: polling, but no queue\n");
+
+		spin_unlock(&priv->rx_lock);
+		napi_complete(napi);
+		return 0;
+	}
+
+	desc = priv->rx_head;
+	restart = NULL;
+	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+		processed++;
+
+		if ((desc->dataflags & CPMAC_EOQ) != 0) {
+			/* The last update to eoq->hw_next didn't happen
+			 * soon enough, and the receiver stopped here.
+			 * Remember this descriptor so we can restart
+			 * the receiver after freeing some space.
+			 */
+			if (unlikely(restart)) {
+				if (netif_msg_rx_err(priv))
+					netdev_err(priv->dev, "poll found a"
+						   " duplicate EOQ: %p and %p\n",
+						   restart, desc);
+				goto fatal_error;
+			}
+
+			restart = desc->next;
+		}
+
+		skb = cpmac_rx_one(priv, desc);
+		if (likely(skb)) {
+			netif_receive_skb(skb);
+			received++;
+		}
+		desc = desc->next;
+	}
+
+	if (desc != priv->rx_head) {
+		/* We freed some buffers, but not the whole ring,
+		 * add what we did free to the rx list
+		 */
+		desc->prev->hw_next = (u32)0;
+		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
+	}
+
+	/* Optimization: If we did not actually process an EOQ (perhaps because
+	 * of quota limits), check to see if the tail of the queue has EOQ set.
+	 * We should immediately restart in that case so that the receiver can
+	 * restart and run in parallel with more packet processing.
+	 * This lets us handle slightly larger bursts before running
+	 * out of ring space (assuming dev->weight < ring_size)
+	 */
+
+	if (!restart &&
+	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
+		    == CPMAC_EOQ &&
+	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
+		/* reset EOQ so the poll loop (above) doesn't try to
+		 * restart this when it eventually gets to this descriptor.
+		 */
+		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
+		restart = priv->rx_head;
+	}
+
+	if (restart) {
+		priv->dev->stats.rx_errors++;
+		priv->dev->stats.rx_fifo_errors++;
+		if (netif_msg_rx_err(priv) && net_ratelimit())
+			netdev_warn(priv->dev, "rx dma ring overrun\n");
+
+		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
+			if (netif_msg_drv(priv))
+				netdev_err(priv->dev, "cpmac_poll is trying "
+					"to restart rx from a descriptor "
+					"that's not free: %p\n", restart);
+			goto fatal_error;
+		}
+
+		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
+	}
+
+	priv->rx_head = desc;
+	spin_unlock(&priv->rx_lock);
+	if (unlikely(netif_msg_rx_status(priv)))
+		netdev_dbg(priv->dev, "poll processed %d packets\n", received);
+
+	if (processed == 0) {
+		/* we ran out of packets to read,
+		 * revert to interrupt-driven mode
+		 */
+		napi_complete(napi);
+		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+		return 0;
+	}
+
+	return 1;
+
+fatal_error:
+	/* Something went horribly wrong.
+	 * Reset hardware to try to recover rather than wedging.
+	 */
+	if (netif_msg_drv(priv)) {
+		netdev_err(priv->dev, "cpmac_poll is confused. "
+			   "Resetting hardware\n");
+		cpmac_dump_all_desc(priv->dev);
+		netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+			   cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+			   cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+	}
+
+	spin_unlock(&priv->rx_lock);
+	napi_complete(napi);
+	netif_tx_stop_all_queues(priv->dev);
+	napi_disable(&priv->napi);
+
+	atomic_inc(&priv->reset_pending);
+	cpmac_hw_stop(priv->dev);
+	if (!schedule_work(&priv->reset_work))
+		atomic_dec(&priv->reset_pending);
+
+	return 0;
+
+}
+
+static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int queue;
+	unsigned int len;
+	struct cpmac_desc *desc;
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	if (unlikely(atomic_read(&priv->reset_pending)))
+		return NETDEV_TX_BUSY;
+
+	if (unlikely(skb_padto(skb, ETH_ZLEN)))
+		return NETDEV_TX_OK;
+
+	len = max_t(unsigned int, skb->len, ETH_ZLEN);
+	queue = skb_get_queue_mapping(skb);
+	netif_stop_subqueue(dev, queue);
+
+	desc = &priv->desc_ring[queue];
+	if (unlikely(desc->dataflags & CPMAC_OWN)) {
+		if (netif_msg_tx_err(priv) && net_ratelimit())
+			netdev_warn(dev, "tx dma ring full\n");
+
+		return NETDEV_TX_BUSY;
+	}
+
+	spin_lock(&priv->lock);
+	spin_unlock(&priv->lock);
+	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+	desc->skb = skb;
+	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
+					    DMA_TO_DEVICE);
+	desc->hw_data = (u32)desc->data_mapping;
+	desc->datalen = len;
+	desc->buflen = len;
+	if (unlikely(netif_msg_tx_queued(priv)))
+		netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
+	if (unlikely(netif_msg_hw(priv)))
+		cpmac_dump_desc(dev, desc);
+	if (unlikely(netif_msg_pktdata(priv)))
+		cpmac_dump_skb(dev, skb);
+	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
+
+	return NETDEV_TX_OK;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int queue)
+{
+	struct cpmac_desc *desc;
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	desc = &priv->desc_ring[queue];
+	cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
+	if (likely(desc->skb)) {
+		spin_lock(&priv->lock);
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += desc->skb->len;
+		spin_unlock(&priv->lock);
+		dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
+				 DMA_TO_DEVICE);
+
+		if (unlikely(netif_msg_tx_done(priv)))
+			netdev_dbg(dev, "sent 0x%p, len=%d\n",
+				   desc->skb, desc->skb->len);
+
+		dev_kfree_skb_irq(desc->skb);
+		desc->skb = NULL;
+		if (__netif_subqueue_stopped(dev, queue))
+			netif_wake_subqueue(dev, queue);
+	} else {
+		if (netif_msg_tx_err(priv) && net_ratelimit())
+			netdev_warn(dev, "end_xmit: spurious interrupt\n");
+		if (__netif_subqueue_stopped(dev, queue))
+			netif_wake_subqueue(dev, queue);
+	}
+}
+
+static void cpmac_hw_stop(struct net_device *dev)
+{
+	int i;
+	struct cpmac_priv *priv = netdev_priv(dev);
+	struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
+
+	ar7_device_reset(pdata->reset_bit);
+	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
+	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
+	for (i = 0; i < 8; i++) {
+		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+	}
+	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
+}
+
+static void cpmac_hw_start(struct net_device *dev)
+{
+	int i;
+	struct cpmac_priv *priv = netdev_priv(dev);
+	struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
+
+	ar7_device_reset(pdata->reset_bit);
+	for (i = 0; i < 8; i++) {
+		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+	}
+	cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
+
+	cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
+		    MBP_RXMCAST);
+	cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
+	for (i = 0; i < 8; i++)
+		cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
+	cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
+	cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
+		    (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
+		    (dev->dev_addr[3] << 24));
+	cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
+	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+	cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
+	cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+	cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
+	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+
+	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
+	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
+	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
+		    MAC_FDX);
+}
+
+static void cpmac_clear_rx(struct net_device *dev)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+	struct cpmac_desc *desc;
+	int i;
+
+	if (unlikely(!priv->rx_head))
+		return;
+	desc = priv->rx_head;
+	for (i = 0; i < priv->ring_size; i++) {
+		if ((desc->dataflags & CPMAC_OWN) == 0) {
+			if (netif_msg_rx_err(priv) && net_ratelimit())
+				netdev_warn(dev, "packet dropped\n");
+			if (unlikely(netif_msg_hw(priv)))
+				cpmac_dump_desc(dev, desc);
+			desc->dataflags = CPMAC_OWN;
+			dev->stats.rx_dropped++;
+		}
+		desc->hw_next = desc->next->mapping;
+		desc = desc->next;
+	}
+	priv->rx_head->prev->hw_next = 0;
+}
+
+static void cpmac_clear_tx(struct net_device *dev)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (unlikely(!priv->desc_ring))
+		return;
+	for (i = 0; i < CPMAC_QUEUES; i++) {
+		priv->desc_ring[i].dataflags = 0;
+		if (priv->desc_ring[i].skb) {
+			dev_kfree_skb_any(priv->desc_ring[i].skb);
+			priv->desc_ring[i].skb = NULL;
+		}
+	}
+}
+
+static void cpmac_hw_error(struct work_struct *work)
+{
+	struct cpmac_priv *priv =
+		container_of(work, struct cpmac_priv, reset_work);
+
+	spin_lock(&priv->rx_lock);
+	cpmac_clear_rx(priv->dev);
+	spin_unlock(&priv->rx_lock);
+	cpmac_clear_tx(priv->dev);
+	cpmac_hw_start(priv->dev);
+	barrier();
+	atomic_dec(&priv->reset_pending);
+
+	netif_tx_wake_all_queues(priv->dev);
+	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+}
+
+static void cpmac_check_status(struct net_device *dev)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
+	int rx_channel = (macstatus >> 8) & 7;
+	int rx_code = (macstatus >> 12) & 15;
+	int tx_channel = (macstatus >> 16) & 7;
+	int tx_code = (macstatus >> 20) & 15;
+
+	if (rx_code || tx_code) {
+		if (netif_msg_drv(priv) && net_ratelimit()) {
+			/* Can't find any documentation on what these
+			 * error codes actually are. So just log them and hope..
+			 */
+			if (rx_code)
+				netdev_warn(dev, "host error %d on rx "
+					"channel %d (macstatus %08x), resetting\n",
+					rx_code, rx_channel, macstatus);
+			if (tx_code)
+				netdev_warn(dev, "host error %d on tx "
+					"channel %d (macstatus %08x), resetting\n",
+					tx_code, tx_channel, macstatus);
+		}
+
+		netif_tx_stop_all_queues(dev);
+		cpmac_hw_stop(dev);
+		if (schedule_work(&priv->reset_work))
+			atomic_inc(&priv->reset_pending);
+		if (unlikely(netif_msg_hw(priv)))
+			cpmac_dump_regs(dev);
+	}
+	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct cpmac_priv *priv;
+	int queue;
+	u32 status;
+
+	priv = netdev_priv(dev);
+
+	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
+
+	if (unlikely(netif_msg_intr(priv)))
+		netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
+
+	if (status & MAC_INT_TX)
+		cpmac_end_xmit(dev, (status & 7));
+
+	if (status & MAC_INT_RX) {
+		queue = (status >> 8) & 7;
+		if (napi_schedule_prep(&priv->napi)) {
+			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+			__napi_schedule(&priv->napi);
+		}
+	}
+
+	cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
+
+	if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
+		cpmac_check_status(dev);
+
+	return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	spin_lock(&priv->lock);
+	dev->stats.tx_errors++;
+	spin_unlock(&priv->lock);
+	if (netif_msg_tx_err(priv) && net_ratelimit())
+		netdev_warn(dev, "transmit timeout\n");
+
+	atomic_inc(&priv->reset_pending);
+	barrier();
+	cpmac_clear_tx(dev);
+	barrier();
+	atomic_dec(&priv->reset_pending);
+
+	netif_tx_wake_all_queues(priv->dev);
+}
+
+static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	if (!(netif_running(dev)))
+		return -EINVAL;
+	if (!dev->phydev)
+		return -EINVAL;
+
+	return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static void cpmac_get_ringparam(struct net_device *dev,
+						struct ethtool_ringparam *ring)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	ring->rx_max_pending = 1024;
+	ring->rx_mini_max_pending = 1;
+	ring->rx_jumbo_max_pending = 1;
+	ring->tx_max_pending = 1;
+
+	ring->rx_pending = priv->ring_size;
+	ring->rx_mini_pending = 1;
+	ring->rx_jumbo_pending = 1;
+	ring->tx_pending = 1;
+}
+
+static int cpmac_set_ringparam(struct net_device *dev,
+						struct ethtool_ringparam *ring)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+
+	if (netif_running(dev))
+		return -EBUSY;
+	priv->ring_size = ring->rx_pending;
+
+	return 0;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, "cpmac", sizeof(info->driver));
+	strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+	snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+	.get_drvinfo = cpmac_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_ringparam = cpmac_get_ringparam,
+	.set_ringparam = cpmac_set_ringparam,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+	struct cpmac_priv *priv = netdev_priv(dev);
+	int new_state = 0;
+
+	spin_lock(&priv->lock);
+	if (dev->phydev->link) {
+		netif_tx_start_all_queues(dev);
+		if (dev->phydev->duplex != priv->oldduplex) {
+			new_state = 1;
+			priv->oldduplex = dev->phydev->duplex;
+		}
+
+		if (dev->phydev->speed != priv->oldspeed) {
+			new_state = 1;
+			priv->oldspeed = dev->phydev->speed;
+		}
+
+		if (!priv->oldlink) {
+			new_state = 1;
+			priv->oldlink = 1;
+		}
+	} else if (priv->oldlink) {
+		new_state = 1;
+		priv->oldlink = 0;
+		priv->oldspeed = 0;
+		priv->oldduplex = -1;
+	}
+
+	if (new_state && netif_msg_link(priv) && net_ratelimit())
+		phy_print_status(dev->phydev);
+
+	spin_unlock(&priv->lock);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+	int i, size, res;
+	struct cpmac_priv *priv = netdev_priv(dev);
+	struct resource *mem;
+	struct cpmac_desc *desc;
+	struct sk_buff *skb;
+
+	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+	if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
+		if (netif_msg_drv(priv))
+			netdev_err(dev, "failed to request registers\n");
+
+		res = -ENXIO;
+		goto fail_reserve;
+	}
+
+	priv->regs = ioremap(mem->start, resource_size(mem));
+	if (!priv->regs) {
+		if (netif_msg_drv(priv))
+			netdev_err(dev, "failed to remap registers\n");
+
+		res = -ENXIO;
+		goto fail_remap;
+	}
+
+	size = priv->ring_size + CPMAC_QUEUES;
+	priv->desc_ring = dma_alloc_coherent(&dev->dev,
+					     sizeof(struct cpmac_desc) * size,
+					     &priv->dma_ring,
+					     GFP_KERNEL);
+	if (!priv->desc_ring) {
+		res = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	for (i = 0; i < size; i++)
+		priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
+
+	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+	for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
+		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
+		if (unlikely(!skb)) {
+			res = -ENOMEM;
+			goto fail_desc;
+		}
+		desc->skb = skb;
+		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+						    CPMAC_SKB_SIZE,
+						    DMA_FROM_DEVICE);
+		desc->hw_data = (u32)desc->data_mapping;
+		desc->buflen = CPMAC_SKB_SIZE;
+		desc->dataflags = CPMAC_OWN;
+		desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
+		desc->next->prev = desc;
+		desc->hw_next = (u32)desc->next->mapping;
+	}
+
+	priv->rx_head->prev->hw_next = (u32)0;
+
+	res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
+	if (res) {
+		if (netif_msg_drv(priv))
+			netdev_err(dev, "failed to obtain irq\n");
+
+		goto fail_irq;
+	}
+
+	atomic_set(&priv->reset_pending, 0);
+	INIT_WORK(&priv->reset_work, cpmac_hw_error);
+	cpmac_hw_start(dev);
+
+	napi_enable(&priv->napi);
+	dev->phydev->state = PHY_CHANGELINK;
+	phy_start(dev->phydev);
+
+	return 0;
+
+fail_irq:
+fail_desc:
+	for (i = 0; i < priv->ring_size; i++) {
+		if (priv->rx_head[i].skb) {
+			dma_unmap_single(&dev->dev,
+					 priv->rx_head[i].data_mapping,
+					 CPMAC_SKB_SIZE,
+					 DMA_FROM_DEVICE);
+			kfree_skb(priv->rx_head[i].skb);
+		}
+	}
+	dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
+			  priv->desc_ring, priv->dma_ring);
+
+fail_alloc:
+	iounmap(priv->regs);
+
+fail_remap:
+	release_mem_region(mem->start, resource_size(mem));
+
+fail_reserve:
+	return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+	int i;
+	struct cpmac_priv *priv = netdev_priv(dev);
+	struct resource *mem;
+
+	netif_tx_stop_all_queues(dev);
+
+	cancel_work_sync(&priv->reset_work);
+	napi_disable(&priv->napi);
+	phy_stop(dev->phydev);
+
+	cpmac_hw_stop(dev);
+
+	for (i = 0; i < 8; i++)
+		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+	cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
+	cpmac_write(priv->regs, CPMAC_MBP, 0);
+
+	free_irq(dev->irq, dev);
+	iounmap(priv->regs);
+	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+	release_mem_region(mem->start, resource_size(mem));
+	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+	for (i = 0; i < priv->ring_size; i++) {
+		if (priv->rx_head[i].skb) {
+			dma_unmap_single(&dev->dev,
+					 priv->rx_head[i].data_mapping,
+					 CPMAC_SKB_SIZE,
+					 DMA_FROM_DEVICE);
+			kfree_skb(priv->rx_head[i].skb);
+		}
+	}
+
+	dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
+			  (CPMAC_QUEUES + priv->ring_size),
+			  priv->desc_ring, priv->dma_ring);
+
+	return 0;
+}
+
+static const struct net_device_ops cpmac_netdev_ops = {
+	.ndo_open		= cpmac_open,
+	.ndo_stop		= cpmac_stop,
+	.ndo_start_xmit		= cpmac_start_xmit,
+	.ndo_tx_timeout		= cpmac_tx_timeout,
+	.ndo_set_rx_mode	= cpmac_set_multicast_list,
+	.ndo_do_ioctl		= cpmac_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
+};
+
+static int external_switch;
+
+static int cpmac_probe(struct platform_device *pdev)
+{
+	int rc, phy_id;
+	char mdio_bus_id[MII_BUS_ID_SIZE];
+	struct resource *mem;
+	struct cpmac_priv *priv;
+	struct net_device *dev;
+	struct plat_cpmac_data *pdata;
+	struct phy_device *phydev = NULL;
+
+	pdata = dev_get_platdata(&pdev->dev);
+
+	if (external_switch || dumb_switch) {
+		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
+		phy_id = pdev->id;
+	} else {
+		for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+			if (!(pdata->phy_mask & (1 << phy_id)))
+				continue;
+			if (!mdiobus_get_phy(cpmac_mii, phy_id))
+				continue;
+			strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
+			break;
+		}
+	}
+
+	if (phy_id == PHY_MAX_ADDR) {
+		dev_err(&pdev->dev, "no PHY present, falling back "
+			"to switch on MDIO bus 0\n");
+		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
+		phy_id = pdev->id;
+	}
+	mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
+
+	dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
+	if (!dev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	platform_set_drvdata(pdev, dev);
+	priv = netdev_priv(dev);
+
+	priv->pdev = pdev;
+	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	if (!mem) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	dev->irq = platform_get_irq_byname(pdev, "irq");
+
+	dev->netdev_ops = &cpmac_netdev_ops;
+	dev->ethtool_ops = &cpmac_ethtool_ops;
+
+	netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+
+	spin_lock_init(&priv->lock);
+	spin_lock_init(&priv->rx_lock);
+	priv->dev = dev;
+	priv->ring_size = 64;
+	priv->msg_enable = netif_msg_init(debug_level, 0xff);
+	memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
+
+	snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+						mdio_bus_id, phy_id);
+
+	phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+			     PHY_INTERFACE_MODE_MII);
+
+	if (IS_ERR(phydev)) {
+		if (netif_msg_drv(priv))
+			dev_err(&pdev->dev, "Could not attach to PHY\n");
+
+		rc = PTR_ERR(phydev);
+		goto fail;
+	}
+
+	rc = register_netdev(dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Could not register net device\n");
+		goto fail;
+	}
+
+	if (netif_msg_probe(priv)) {
+		dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
+			 "mac: %pM\n", (void *)mem->start, dev->irq,
+			 priv->phy_name, dev->dev_addr);
+	}
+
+	return 0;
+
+fail:
+	free_netdev(dev);
+	return rc;
+}
+
+static int cpmac_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	unregister_netdev(dev);
+	free_netdev(dev);
+
+	return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+	.driver = {
+		.name 	= "cpmac",
+	},
+	.probe 	= cpmac_probe,
+	.remove = cpmac_remove,
+};
+
+int cpmac_init(void)
+{
+	u32 mask;
+	int i, res;
+
+	cpmac_mii = mdiobus_alloc();
+	if (cpmac_mii == NULL)
+		return -ENOMEM;
+
+	cpmac_mii->name = "cpmac-mii";
+	cpmac_mii->read = cpmac_mdio_read;
+	cpmac_mii->write = cpmac_mdio_write;
+	cpmac_mii->reset = cpmac_mdio_reset;
+
+	cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
+
+	if (!cpmac_mii->priv) {
+		pr_err("Can't ioremap mdio registers\n");
+		res = -ENXIO;
+		goto fail_alloc;
+	}
+
+	/* FIXME: unhardcode gpio&reset bits */
+	ar7_gpio_disable(26);
+	ar7_gpio_disable(27);
+	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+	ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+	ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+	cpmac_mii->reset(cpmac_mii);
+
+	for (i = 0; i < 300; i++) {
+		mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
+		if (mask)
+			break;
+		else
+			msleep(10);
+	}
+
+	mask &= 0x7fffffff;
+	if (mask & (mask - 1)) {
+		external_switch = 1;
+		mask = 0;
+	}
+
+	cpmac_mii->phy_mask = ~(mask | 0x80000000);
+	snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
+
+	res = mdiobus_register(cpmac_mii);
+	if (res)
+		goto fail_mii;
+
+	res = platform_driver_register(&cpmac_driver);
+	if (res)
+		goto fail_cpmac;
+
+	return 0;
+
+fail_cpmac:
+	mdiobus_unregister(cpmac_mii);
+
+fail_mii:
+	iounmap(cpmac_mii->priv);
+
+fail_alloc:
+	mdiobus_free(cpmac_mii);
+
+	return res;
+}
+
+void cpmac_exit(void)
+{
+	platform_driver_unregister(&cpmac_driver);
+	mdiobus_unregister(cpmac_mii);
+	iounmap(cpmac_mii->priv);
+	mdiobus_free(cpmac_mii);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
new file mode 100644
index 0000000..38d1cc5
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -0,0 +1,104 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "cpsw.h"
+
+#define CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+				       int slave, u8 *mac_addr)
+{
+	u32 macid_lsb;
+	u32 macid_msb;
+	struct regmap *syscon;
+
+	syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+	if (IS_ERR(syscon)) {
+		if (PTR_ERR(syscon) == -ENODEV)
+			return 0;
+		return PTR_ERR(syscon);
+	}
+
+	regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lsb);
+	regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_msb);
+
+	mac_addr[0] = (macid_msb >> 16) & 0xff;
+	mac_addr[1] = (macid_msb >> 8)  & 0xff;
+	mac_addr[2] = macid_msb & 0xff;
+	mac_addr[3] = (macid_lsb >> 16) & 0xff;
+	mac_addr[4] = (macid_lsb >> 8)  & 0xff;
+	mac_addr[5] = macid_lsb & 0xff;
+
+	return 0;
+}
+
+static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+				    u8 *mac_addr)
+{
+	u32 macid_lo;
+	u32 macid_hi;
+	struct regmap *syscon;
+
+	syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+	if (IS_ERR(syscon)) {
+		if (PTR_ERR(syscon) == -ENODEV)
+			return 0;
+		return PTR_ERR(syscon);
+	}
+
+	regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lo);
+	regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_hi);
+
+	mac_addr[5] = (macid_lo >> 8) & 0xff;
+	mac_addr[4] = macid_lo & 0xff;
+	mac_addr[3] = (macid_hi >> 24) & 0xff;
+	mac_addr[2] = (macid_hi >> 16) & 0xff;
+	mac_addr[1] = (macid_hi >> 8) & 0xff;
+	mac_addr[0] = macid_hi & 0xff;
+
+	return 0;
+}
+
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
+{
+	if (of_machine_is_compatible("ti,dm8148"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+	if (of_machine_is_compatible("ti,am33xx"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+	if (of_device_is_compatible(dev->of_node, "ti,am3517-emac"))
+		return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr);
+
+	if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
+
+	if (of_machine_is_compatible("ti,am43"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+	if (of_machine_is_compatible("ti,dra7"))
+		return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
+
+	dev_info(dev, "incompatible machine/device type for reading mac address\n");
+	return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ti_cm_get_macid);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 0000000..396e1cd
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,253 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Module Author: Mugunthan V N <mugunthanvnm@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII	0
+#define AM33XX_GMII_SEL_MODE_RMII	1
+#define AM33XX_GMII_SEL_MODE_RGMII	2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN	BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN	BIT(6)
+#define AM33XX_GMII_SEL_RGMII2_IDMODE	BIT(5)
+#define AM33XX_GMII_SEL_RGMII1_IDMODE	BIT(4)
+
+#define GMII_SEL_MODE_MASK		0x3
+
+struct cpsw_phy_sel_priv {
+	struct device	*dev;
+	u32 __iomem	*gmii_sel;
+	bool		rmii_clock_external;
+	void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+			     phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+				 phy_interface_t phy_mode, int slave)
+{
+	u32 reg;
+	u32 mask;
+	u32 mode = 0;
+	bool rgmii_id = false;
+
+	reg = readl(priv->gmii_sel);
+
+	switch (phy_mode) {
+	case PHY_INTERFACE_MODE_RMII:
+		mode = AM33XX_GMII_SEL_MODE_RMII;
+		break;
+
+	case PHY_INTERFACE_MODE_RGMII:
+		mode = AM33XX_GMII_SEL_MODE_RGMII;
+		break;
+
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		mode = AM33XX_GMII_SEL_MODE_RGMII;
+		rgmii_id = true;
+		break;
+
+	default:
+		dev_warn(priv->dev,
+			 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
+			phy_modes(phy_mode));
+		/* fallthrough */
+	case PHY_INTERFACE_MODE_MII:
+		mode = AM33XX_GMII_SEL_MODE_MII;
+		break;
+	};
+
+	mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
+	mask |= BIT(slave + 4);
+	mode <<= slave * 2;
+
+	if (priv->rmii_clock_external) {
+		if (slave == 0)
+			mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+		else
+			mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+	}
+
+	if (rgmii_id) {
+		if (slave == 0)
+			mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
+		else
+			mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
+	}
+
+	reg &= ~mask;
+	reg |= mode;
+
+	writel(reg, priv->gmii_sel);
+}
+
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+				 phy_interface_t phy_mode, int slave)
+{
+	u32 reg;
+	u32 mask;
+	u32 mode = 0;
+
+	reg = readl(priv->gmii_sel);
+
+	switch (phy_mode) {
+	case PHY_INTERFACE_MODE_RMII:
+		mode = AM33XX_GMII_SEL_MODE_RMII;
+		break;
+
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		mode = AM33XX_GMII_SEL_MODE_RGMII;
+		break;
+
+	default:
+		dev_warn(priv->dev,
+			 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
+			phy_modes(phy_mode));
+		/* fallthrough */
+	case PHY_INTERFACE_MODE_MII:
+		mode = AM33XX_GMII_SEL_MODE_MII;
+		break;
+	};
+
+	switch (slave) {
+	case 0:
+		mask = GMII_SEL_MODE_MASK;
+		break;
+	case 1:
+		mask = GMII_SEL_MODE_MASK << 4;
+		mode <<= 4;
+		break;
+	default:
+		dev_err(priv->dev, "invalid slave number...\n");
+		return;
+	}
+
+	if (priv->rmii_clock_external)
+		dev_err(priv->dev, "RMII External clock is not supported\n");
+
+	reg &= ~mask;
+	reg |= mode;
+
+	writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+	struct device_node *node = (struct device_node *)data;
+	return dev->of_node == node &&
+		dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+	struct device_node *node;
+	struct cpsw_phy_sel_priv *priv;
+
+	node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
+	if (!node) {
+		node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+		if (!node) {
+			dev_err(dev, "Phy mode driver DT not found\n");
+			return;
+		}
+	}
+
+	dev = bus_find_device(&platform_bus_type, NULL, node, match);
+	if (!dev) {
+		dev_err(dev, "unable to find platform device for %pOF\n", node);
+		goto out;
+	}
+
+	priv = dev_get_drvdata(dev);
+
+	priv->cpsw_phy_sel(priv, phy_mode, slave);
+
+	put_device(dev);
+out:
+	of_node_put(node);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+	{
+		.compatible	= "ti,am3352-cpsw-phy-sel",
+		.data		= &cpsw_gmii_sel_am3352,
+	},
+	{
+		.compatible	= "ti,dra7xx-cpsw-phy-sel",
+		.data		= &cpsw_gmii_sel_dra7xx,
+	},
+	{
+		.compatible	= "ti,am43xx-cpsw-phy-sel",
+		.data		= &cpsw_gmii_sel_am3352,
+	},
+	{}
+};
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+	struct resource	*res;
+	const struct of_device_id *of_id;
+	struct cpsw_phy_sel_priv *priv;
+
+	of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+	if (!of_id)
+		return -EINVAL;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+		return -ENOMEM;
+	}
+
+	priv->dev = &pdev->dev;
+	priv->cpsw_phy_sel = of_id->data;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+	priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->gmii_sel))
+		return PTR_ERR(priv->gmii_sel);
+
+	if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+		priv->rmii_clock_external = true;
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+	.probe		= cpsw_phy_sel_probe,
+	.driver		= {
+		.name	= "cpsw-phy-sel",
+		.of_match_table = cpsw_phy_sel_id_table,
+	},
+};
+builtin_platform_driver(cpsw_phy_sel_driver);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
new file mode 100644
index 0000000..832bce0
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -0,0 +1,3728 @@
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <net/pkt_cls.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpts.h"
+#include "davinci_cpdma.h"
+
+#include <net/pkt_sched.h>
+
+#define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
+			 NETIF_MSG_DRV		| NETIF_MSG_LINK	| \
+			 NETIF_MSG_IFUP		| NETIF_MSG_INTR	| \
+			 NETIF_MSG_PROBE	| NETIF_MSG_TIMER	| \
+			 NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	| \
+			 NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	| \
+			 NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	| \
+			 NETIF_MSG_RX_STATUS)
+
+#define cpsw_info(priv, type, format, ...)		\
+do {								\
+	if (netif_msg_##type(priv) && net_ratelimit())		\
+		dev_info(priv->dev, format, ## __VA_ARGS__);	\
+} while (0)
+
+#define cpsw_err(priv, type, format, ...)		\
+do {								\
+	if (netif_msg_##type(priv) && net_ratelimit())		\
+		dev_err(priv->dev, format, ## __VA_ARGS__);	\
+} while (0)
+
+#define cpsw_dbg(priv, type, format, ...)		\
+do {								\
+	if (netif_msg_##type(priv) && net_ratelimit())		\
+		dev_dbg(priv->dev, format, ## __VA_ARGS__);	\
+} while (0)
+
+#define cpsw_notice(priv, type, format, ...)		\
+do {								\
+	if (netif_msg_##type(priv) && net_ratelimit())		\
+		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
+} while (0)
+
+#define ALE_ALL_PORTS		0x7
+
+#define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
+#define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
+#define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
+
+#define CPSW_VERSION_1		0x19010a
+#define CPSW_VERSION_2		0x19010c
+#define CPSW_VERSION_3		0x19010f
+#define CPSW_VERSION_4		0x190112
+
+#define HOST_PORT_NUM		0
+#define CPSW_ALE_PORTS_NUM	3
+#define SLIVER_SIZE		0x40
+
+#define CPSW1_HOST_PORT_OFFSET	0x028
+#define CPSW1_SLAVE_OFFSET	0x050
+#define CPSW1_SLAVE_SIZE	0x040
+#define CPSW1_CPDMA_OFFSET	0x100
+#define CPSW1_STATERAM_OFFSET	0x200
+#define CPSW1_HW_STATS		0x400
+#define CPSW1_CPTS_OFFSET	0x500
+#define CPSW1_ALE_OFFSET	0x600
+#define CPSW1_SLIVER_OFFSET	0x700
+
+#define CPSW2_HOST_PORT_OFFSET	0x108
+#define CPSW2_SLAVE_OFFSET	0x200
+#define CPSW2_SLAVE_SIZE	0x100
+#define CPSW2_CPDMA_OFFSET	0x800
+#define CPSW2_HW_STATS		0x900
+#define CPSW2_STATERAM_OFFSET	0xa00
+#define CPSW2_CPTS_OFFSET	0xc00
+#define CPSW2_ALE_OFFSET	0xd00
+#define CPSW2_SLIVER_OFFSET	0xd80
+#define CPSW2_BD_OFFSET		0x2000
+
+#define CPDMA_RXTHRESH		0x0c0
+#define CPDMA_RXFREE		0x0e0
+#define CPDMA_TXHDP		0x00
+#define CPDMA_RXHDP		0x20
+#define CPDMA_TXCP		0x40
+#define CPDMA_RXCP		0x60
+
+#define CPSW_POLL_WEIGHT	64
+#define CPSW_RX_VLAN_ENCAP_HDR_SIZE		4
+#define CPSW_MIN_PACKET_SIZE	(VLAN_ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN +\
+				 ETH_FCS_LEN +\
+				 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
+
+#define RX_PRIORITY_MAPPING	0x76543210
+#define TX_PRIORITY_MAPPING	0x33221100
+#define CPDMA_TX_PRIORITY_MAP	0x76543210
+
+#define CPSW_VLAN_AWARE		BIT(1)
+#define CPSW_RX_VLAN_ENCAP	BIT(2)
+#define CPSW_ALE_VLAN_AWARE	1
+
+#define CPSW_FIFO_NORMAL_MODE		(0 << 16)
+#define CPSW_FIFO_DUAL_MAC_MODE		(1 << 16)
+#define CPSW_FIFO_RATE_LIMIT_MODE	(2 << 16)
+
+#define CPSW_INTPACEEN		(0x3f << 16)
+#define CPSW_INTPRESCALE_MASK	(0x7FF << 0)
+#define CPSW_CMINTMAX_CNT	63
+#define CPSW_CMINTMIN_CNT	2
+#define CPSW_CMINTMAX_INTVL	(1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL	((1000 / CPSW_CMINTMAX_CNT) + 1)
+
+#define cpsw_slave_index(cpsw, priv)				\
+		((cpsw->data.dual_emac) ? priv->emac_port :	\
+		cpsw->data.active_slave)
+#define IRQ_NUM			2
+#define CPSW_MAX_QUEUES		8
+#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT	16
+#define CPSW_FIFO_SHAPE_EN_SHIFT	16
+#define CPSW_FIFO_RATE_EN_SHIFT		20
+#define CPSW_TC_NUM			4
+#define CPSW_FIFO_SHAPERS_NUM		(CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK			0x7f
+
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT	29
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK		GENMASK(2, 0)
+#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT	16
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT	8
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK	GENMASK(1, 0)
+enum {
+	CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
+	CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
+	CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
+	CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
+};
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
+
+static int ale_ageout = 10;
+module_param(ale_ageout, int, 0);
+MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
+
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+module_param(rx_packet_max, int, 0);
+MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
+
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+module_param(descs_pool_size, int, 0444);
+MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
+
+struct cpsw_wr_regs {
+	u32	id_ver;
+	u32	soft_reset;
+	u32	control;
+	u32	int_control;
+	u32	rx_thresh_en;
+	u32	rx_en;
+	u32	tx_en;
+	u32	misc_en;
+	u32	mem_allign1[8];
+	u32	rx_thresh_stat;
+	u32	rx_stat;
+	u32	tx_stat;
+	u32	misc_stat;
+	u32	mem_allign2[8];
+	u32	rx_imax;
+	u32	tx_imax;
+
+};
+
+struct cpsw_ss_regs {
+	u32	id_ver;
+	u32	control;
+	u32	soft_reset;
+	u32	stat_port_en;
+	u32	ptype;
+	u32	soft_idle;
+	u32	thru_rate;
+	u32	gap_thresh;
+	u32	tx_start_wds;
+	u32	flow_control;
+	u32	vlan_ltype;
+	u32	ts_ltype;
+	u32	dlr_ltype;
+};
+
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL        0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL       0x00 /* Control Register */
+#define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI               0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED     BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN      BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN      BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN         BIT(16) /* DSCP Priority Enable */
+#define TS_107              BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320              BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319              BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132              BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131              BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130              BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129              BIT(9)  /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO      BIT(8)  /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN       BIT(6)  /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN       BIT(4)  /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN        BIT(3)  /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN        BIT(2)  /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN            BIT(1)  /* Time Sync Transmit Enable */
+#define TS_RX_EN            BIT(0)  /* Time Sync Receive Enable */
+
+#define CTRL_V2_TS_BITS \
+	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+	(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+	 TS_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK    (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK      (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN		BIT(0)
+#define CPSW_V1_TS_TX_EN		BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS		16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT	16
+
+#define CPSW_MAX_BLKS_TX		15
+#define CPSW_MAX_BLKS_TX_SHIFT		4
+#define CPSW_MAX_BLKS_RX		5
+
+struct cpsw_host_regs {
+	u32	max_blks;
+	u32	blk_cnt;
+	u32	tx_in_ctl;
+	u32	port_vlan;
+	u32	tx_pri_map;
+	u32	cpdma_tx_pri_map;
+	u32	cpdma_rx_chan_map;
+};
+
+struct cpsw_sliver_regs {
+	u32	id_ver;
+	u32	mac_control;
+	u32	mac_status;
+	u32	soft_reset;
+	u32	rx_maxlen;
+	u32	__reserved_0;
+	u32	rx_pause;
+	u32	tx_pause;
+	u32	__reserved_1;
+	u32	rx_pri_map;
+};
+
+struct cpsw_hw_stats {
+	u32	rxgoodframes;
+	u32	rxbroadcastframes;
+	u32	rxmulticastframes;
+	u32	rxpauseframes;
+	u32	rxcrcerrors;
+	u32	rxaligncodeerrors;
+	u32	rxoversizedframes;
+	u32	rxjabberframes;
+	u32	rxundersizedframes;
+	u32	rxfragments;
+	u32	__pad_0[2];
+	u32	rxoctets;
+	u32	txgoodframes;
+	u32	txbroadcastframes;
+	u32	txmulticastframes;
+	u32	txpauseframes;
+	u32	txdeferredframes;
+	u32	txcollisionframes;
+	u32	txsinglecollframes;
+	u32	txmultcollframes;
+	u32	txexcessivecollisions;
+	u32	txlatecollisions;
+	u32	txunderrun;
+	u32	txcarriersenseerrors;
+	u32	txoctets;
+	u32	octetframes64;
+	u32	octetframes65t127;
+	u32	octetframes128t255;
+	u32	octetframes256t511;
+	u32	octetframes512t1023;
+	u32	octetframes1024tup;
+	u32	netoctets;
+	u32	rxsofoverruns;
+	u32	rxmofoverruns;
+	u32	rxdmaoverruns;
+};
+
+struct cpsw_slave_data {
+	struct device_node *phy_node;
+	char		phy_id[MII_BUS_ID_SIZE];
+	int		phy_if;
+	u8		mac_addr[ETH_ALEN];
+	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+	struct cpsw_slave_data	*slave_data;
+	u32	ss_reg_ofs;	/* Subsystem control register offset */
+	u32	channels;	/* number of cpdma channels (symmetric) */
+	u32	slaves;		/* number of slave cpgmac ports */
+	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+	u32	ale_entries;	/* ale table size */
+	u32	bd_ram_size;  /*buffer descriptor ram size */
+	u32	mac_control;	/* Mac control register */
+	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
+	bool	dual_emac;	/* Enable Dual EMAC mode */
+};
+
+struct cpsw_slave {
+	void __iomem			*regs;
+	struct cpsw_sliver_regs __iomem	*sliver;
+	int				slave_num;
+	u32				mac_control;
+	struct cpsw_slave_data		*data;
+	struct phy_device		*phy;
+	struct net_device		*ndev;
+	u32				port_vlan;
+};
+
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+	return readl_relaxed(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+	writel_relaxed(val, slave->regs + offset);
+}
+
+struct cpsw_vector {
+	struct cpdma_chan *ch;
+	int budget;
+};
+
+struct cpsw_common {
+	struct device			*dev;
+	struct cpsw_platform_data	data;
+	struct napi_struct		napi_rx;
+	struct napi_struct		napi_tx;
+	struct cpsw_ss_regs __iomem	*regs;
+	struct cpsw_wr_regs __iomem	*wr_regs;
+	u8 __iomem			*hw_stats;
+	struct cpsw_host_regs __iomem	*host_port_regs;
+	u32				version;
+	u32				coal_intvl;
+	u32				bus_freq_mhz;
+	int				rx_packet_max;
+	struct cpsw_slave		*slaves;
+	struct cpdma_ctlr		*dma;
+	struct cpsw_vector		txv[CPSW_MAX_QUEUES];
+	struct cpsw_vector		rxv[CPSW_MAX_QUEUES];
+	struct cpsw_ale			*ale;
+	bool				quirk_irq;
+	bool				rx_irq_disabled;
+	bool				tx_irq_disabled;
+	u32 irqs_table[IRQ_NUM];
+	struct cpts			*cpts;
+	int				rx_ch_num, tx_ch_num;
+	int				speed;
+	int				usage_count;
+};
+
+struct cpsw_priv {
+	struct net_device		*ndev;
+	struct device			*dev;
+	u32				msg_enable;
+	u8				mac_addr[ETH_ALEN];
+	bool				rx_pause;
+	bool				tx_pause;
+	bool				mqprio_hw;
+	int				fifo_bw[CPSW_TC_NUM];
+	int				shp_cfg_speed;
+	u32 emac_port;
+	struct cpsw_common *cpsw;
+};
+
+struct cpsw_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int type;
+	int sizeof_stat;
+	int stat_offset;
+};
+
+enum {
+	CPSW_STATS,
+	CPDMA_RX_STATS,
+	CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m)		CPSW_STATS,				\
+				sizeof(((struct cpsw_hw_stats *)0)->m), \
+				offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
+				sizeof(((struct cpdma_chan_stats *)0)->m), \
+				offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
+				sizeof(((struct cpdma_chan_stats *)0)->m), \
+				offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+	{ "Rx Fragments", CPSW_STAT(rxfragments) },
+	{ "Rx Octets", CPSW_STAT(rxoctets) },
+	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
+	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+	{ "Collisions", CPSW_STAT(txcollisionframes) },
+	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
+	{ "Tx Underrun", CPSW_STAT(txunderrun) },
+	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+	{ "Tx Octets", CPSW_STAT(txoctets) },
+	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+	{ "Net Octets", CPSW_STAT(netoctets) },
+	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+};
+
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+	{ "misqueued", CPDMA_RX_STAT(misqueued) },
+	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+	{ "requeue", CPDMA_RX_STAT(requeue) },
+	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
+
+#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+#define napi_to_cpsw(napi)	container_of(napi, struct cpsw_common, napi)
+#define for_each_slave(priv, func, arg...)				\
+	do {								\
+		struct cpsw_slave *slave;				\
+		struct cpsw_common *cpsw = (priv)->cpsw;		\
+		int n;							\
+		if (cpsw->data.dual_emac)				\
+			(func)((cpsw)->slaves + priv->emac_port, ##arg);\
+		else							\
+			for (n = cpsw->data.slaves,			\
+					slave = cpsw->slaves;		\
+					n; n--)				\
+				(func)(slave++, ##arg);			\
+	} while (0)
+
+static inline int cpsw_get_slave_port(u32 slave_num)
+{
+	return slave_num + 1;
+}
+
+static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	if (cpsw->data.dual_emac) {
+		struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
+		int slave_port = cpsw_get_slave_port(slave->slave_num);
+
+		cpsw_ale_add_mcast(cpsw->ale, addr,
+				   1 << slave_port | ALE_PORT_HOST,
+				   ALE_VLAN, slave->port_vlan, 0);
+		return;
+	}
+
+	cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
+}
+
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	struct cpsw_ale *ale = cpsw->ale;
+	int i;
+
+	if (cpsw->data.dual_emac) {
+		bool flag = false;
+
+		/* Enabling promiscuous mode for one interface will be
+		 * common for both the interface as the interface shares
+		 * the same hardware resource.
+		 */
+		for (i = 0; i < cpsw->data.slaves; i++)
+			if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
+				flag = true;
+
+		if (!enable && flag) {
+			enable = true;
+			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+		}
+
+		if (enable) {
+			/* Enable Bypass */
+			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
+
+			dev_dbg(&ndev->dev, "promiscuity enabled\n");
+		} else {
+			/* Disable Bypass */
+			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
+			dev_dbg(&ndev->dev, "promiscuity disabled\n");
+		}
+	} else {
+		if (enable) {
+			unsigned long timeout = jiffies + HZ;
+
+			/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
+			for (i = 0; i <= cpsw->data.slaves; i++) {
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NOLEARN, 1);
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NO_SA_UPDATE, 1);
+			}
+
+			/* Clear All Untouched entries */
+			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+			do {
+				cpu_relax();
+				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+					break;
+			} while (time_after(timeout, jiffies));
+			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+			/* Clear all mcast from ALE */
+			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
+
+			/* Flood All Unicast Packets to Host port */
+			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+			dev_dbg(&ndev->dev, "promiscuity enabled\n");
+		} else {
+			/* Don't Flood All Unicast Packets to Host port */
+			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+
+			/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
+			for (i = 0; i <= cpsw->data.slaves; i++) {
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NOLEARN, 0);
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NO_SA_UPDATE, 0);
+			}
+			dev_dbg(&ndev->dev, "promiscuity disabled\n");
+		}
+	}
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int vid;
+
+	if (cpsw->data.dual_emac)
+		vid = cpsw->slaves[priv->emac_port].port_vlan;
+	else
+		vid = cpsw->data.default_vlan;
+
+	if (ndev->flags & IFF_PROMISC) {
+		/* Enable promiscuous mode */
+		cpsw_set_promiscious(ndev, true);
+		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
+		return;
+	} else {
+		/* Disable promiscuous mode */
+		cpsw_set_promiscious(ndev, false);
+	}
+
+	/* Restore allmulti on vlans if necessary */
+	cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
+
+	/* Clear all mcast from ALE */
+	cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
+
+	if (!netdev_mc_empty(ndev)) {
+		struct netdev_hw_addr *ha;
+
+		/* program multicast address list into ALE register */
+		netdev_for_each_mc_addr(ha, ndev) {
+			cpsw_add_mcast(priv, ha->addr);
+		}
+	}
+}
+
+static void cpsw_intr_enable(struct cpsw_common *cpsw)
+{
+	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
+
+	cpdma_ctlr_int_ctrl(cpsw->dma, true);
+	return;
+}
+
+static void cpsw_intr_disable(struct cpsw_common *cpsw)
+{
+	writel_relaxed(0, &cpsw->wr_regs->tx_en);
+	writel_relaxed(0, &cpsw->wr_regs->rx_en);
+
+	cpdma_ctlr_int_ctrl(cpsw->dma, false);
+	return;
+}
+
+static void cpsw_tx_handler(void *token, int len, int status)
+{
+	struct netdev_queue	*txq;
+	struct sk_buff		*skb = token;
+	struct net_device	*ndev = skb->dev;
+	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
+
+	/* Check whether the queue is stopped due to stalled tx dma, if the
+	 * queue is stopped then start the queue as we have free desc for tx
+	 */
+	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+	if (unlikely(netif_tx_queue_stopped(txq)))
+		netif_tx_wake_queue(txq);
+
+	cpts_tx_timestamp(cpsw->cpts, skb);
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += len;
+	dev_kfree_skb_any(skb);
+}
+
+static void cpsw_rx_vlan_encap(struct sk_buff *skb)
+{
+	struct cpsw_priv *priv = netdev_priv(skb->dev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
+	u16 vtag, vid, prio, pkt_type;
+
+	/* Remove VLAN header encapsulation word */
+	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
+
+	pkt_type = (rx_vlan_encap_hdr >>
+		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
+		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
+	/* Ignore unknown & Priority-tagged packets*/
+	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
+	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
+		return;
+
+	vid = (rx_vlan_encap_hdr >>
+	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
+	       VLAN_VID_MASK;
+	/* Ignore vid 0 and pass packet as is */
+	if (!vid)
+		return;
+	/* Ignore default vlans in dual mac mode */
+	if (cpsw->data.dual_emac &&
+	    vid == cpsw->slaves[priv->emac_port].port_vlan)
+		return;
+
+	prio = (rx_vlan_encap_hdr >>
+		CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
+		CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
+
+	vtag = (prio << VLAN_PRIO_SHIFT) | vid;
+	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+
+	/* strip vlan tag for VLAN-tagged packet */
+	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
+		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+		skb_pull(skb, VLAN_HLEN);
+	}
+}
+
+static void cpsw_rx_handler(void *token, int len, int status)
+{
+	struct cpdma_chan	*ch;
+	struct sk_buff		*skb = token;
+	struct sk_buff		*new_skb;
+	struct net_device	*ndev = skb->dev;
+	int			ret = 0, port;
+	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
+
+	if (cpsw->data.dual_emac) {
+		port = CPDMA_RX_SOURCE_PORT(status);
+		if (port) {
+			ndev = cpsw->slaves[--port].ndev;
+			skb->dev = ndev;
+		}
+	}
+
+	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+		/* In dual emac mode check for all interfaces */
+		if (cpsw->data.dual_emac && cpsw->usage_count &&
+		    (status >= 0)) {
+			/* The packet received is for the interface which
+			 * is already down and the other interface is up
+			 * and running, instead of freeing which results
+			 * in reducing of the number of rx descriptor in
+			 * DMA engine, requeue skb back to cpdma.
+			 */
+			new_skb = skb;
+			goto requeue;
+		}
+
+		/* the interface is going down, skbs are purged */
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
+	if (new_skb) {
+		skb_copy_queue_mapping(new_skb, skb);
+		skb_put(skb, len);
+		if (status & CPDMA_RX_VLAN_ENCAP)
+			cpsw_rx_vlan_encap(skb);
+		cpts_rx_timestamp(cpsw->cpts, skb);
+		skb->protocol = eth_type_trans(skb, ndev);
+		netif_receive_skb(skb);
+		ndev->stats.rx_bytes += len;
+		ndev->stats.rx_packets++;
+		kmemleak_not_leak(new_skb);
+	} else {
+		ndev->stats.rx_dropped++;
+		new_skb = skb;
+	}
+
+requeue:
+	if (netif_dormant(ndev)) {
+		dev_kfree_skb_any(new_skb);
+		return;
+	}
+
+	ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
+	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
+				skb_tailroom(new_skb), 0);
+	if (WARN_ON(ret < 0))
+		dev_kfree_skb_any(new_skb);
+}
+
+static void cpsw_split_res(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	u32 consumed_rate = 0, bigest_rate = 0;
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_vector *txv = cpsw->txv;
+	int i, ch_weight, rlim_ch_num = 0;
+	int budget, bigest_rate_ch = 0;
+	u32 ch_rate, max_rate;
+	int ch_budget = 0;
+
+	for (i = 0; i < cpsw->tx_ch_num; i++) {
+		ch_rate = cpdma_chan_get_rate(txv[i].ch);
+		if (!ch_rate)
+			continue;
+
+		rlim_ch_num++;
+		consumed_rate += ch_rate;
+	}
+
+	if (cpsw->tx_ch_num == rlim_ch_num) {
+		max_rate = consumed_rate;
+	} else if (!rlim_ch_num) {
+		ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+		bigest_rate = 0;
+		max_rate = consumed_rate;
+	} else {
+		max_rate = cpsw->speed * 1000;
+
+		/* if max_rate is less then expected due to reduced link speed,
+		 * split proportionally according next potential max speed
+		 */
+		if (max_rate < consumed_rate)
+			max_rate *= 10;
+
+		if (max_rate < consumed_rate)
+			max_rate *= 10;
+
+		ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
+		ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+			    (cpsw->tx_ch_num - rlim_ch_num);
+		bigest_rate = (max_rate - consumed_rate) /
+			      (cpsw->tx_ch_num - rlim_ch_num);
+	}
+
+	/* split tx weight/budget */
+	budget = CPSW_POLL_WEIGHT;
+	for (i = 0; i < cpsw->tx_ch_num; i++) {
+		ch_rate = cpdma_chan_get_rate(txv[i].ch);
+		if (ch_rate) {
+			txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+			if (!txv[i].budget)
+				txv[i].budget++;
+			if (ch_rate > bigest_rate) {
+				bigest_rate_ch = i;
+				bigest_rate = ch_rate;
+			}
+
+			ch_weight = (ch_rate * 100) / max_rate;
+			if (!ch_weight)
+				ch_weight++;
+			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
+		} else {
+			txv[i].budget = ch_budget;
+			if (!bigest_rate_ch)
+				bigest_rate_ch = i;
+			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
+		}
+
+		budget -= txv[i].budget;
+	}
+
+	if (budget)
+		txv[bigest_rate_ch].budget += budget;
+
+	/* split rx budget */
+	budget = CPSW_POLL_WEIGHT;
+	ch_budget = budget / cpsw->rx_ch_num;
+	for (i = 0; i < cpsw->rx_ch_num; i++) {
+		cpsw->rxv[i].budget = ch_budget;
+		budget -= ch_budget;
+	}
+
+	if (budget)
+		cpsw->rxv[0].budget += budget;
+}
+
+static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
+{
+	struct cpsw_common *cpsw = dev_id;
+
+	writel(0, &cpsw->wr_regs->tx_en);
+	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
+
+	if (cpsw->quirk_irq) {
+		disable_irq_nosync(cpsw->irqs_table[1]);
+		cpsw->tx_irq_disabled = true;
+	}
+
+	napi_schedule(&cpsw->napi_tx);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+{
+	struct cpsw_common *cpsw = dev_id;
+
+	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+	writel(0, &cpsw->wr_regs->rx_en);
+
+	if (cpsw->quirk_irq) {
+		disable_irq_nosync(cpsw->irqs_table[0]);
+		cpsw->rx_irq_disabled = true;
+	}
+
+	napi_schedule(&cpsw->napi_rx);
+	return IRQ_HANDLED;
+}
+
+static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
+{
+	u32			ch_map;
+	int			num_tx, cur_budget, ch;
+	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
+	struct cpsw_vector	*txv;
+
+	/* process every unprocessed channel */
+	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+		if (!(ch_map & 0x80))
+			continue;
+
+		txv = &cpsw->txv[ch];
+		if (unlikely(txv->budget > budget - num_tx))
+			cur_budget = budget - num_tx;
+		else
+			cur_budget = txv->budget;
+
+		num_tx += cpdma_chan_process(txv->ch, cur_budget);
+		if (num_tx >= budget)
+			break;
+	}
+
+	if (num_tx < budget) {
+		napi_complete(napi_tx);
+		writel(0xff, &cpsw->wr_regs->tx_en);
+	}
+
+	return num_tx;
+}
+
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+	int num_tx;
+
+	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+	if (num_tx < budget) {
+		napi_complete(napi_tx);
+		writel(0xff, &cpsw->wr_regs->tx_en);
+		if (cpsw->tx_irq_disabled) {
+			cpsw->tx_irq_disabled = false;
+			enable_irq(cpsw->irqs_table[1]);
+		}
+	}
+
+	return num_tx;
+}
+
+static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
+{
+	u32			ch_map;
+	int			num_rx, cur_budget, ch;
+	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
+	struct cpsw_vector	*rxv;
+
+	/* process every unprocessed channel */
+	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
+		if (!(ch_map & 0x01))
+			continue;
+
+		rxv = &cpsw->rxv[ch];
+		if (unlikely(rxv->budget > budget - num_rx))
+			cur_budget = budget - num_rx;
+		else
+			cur_budget = rxv->budget;
+
+		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
+		if (num_rx >= budget)
+			break;
+	}
+
+	if (num_rx < budget) {
+		napi_complete_done(napi_rx, num_rx);
+		writel(0xff, &cpsw->wr_regs->rx_en);
+	}
+
+	return num_rx;
+}
+
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+	int num_rx;
+
+	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+	if (num_rx < budget) {
+		napi_complete_done(napi_rx, num_rx);
+		writel(0xff, &cpsw->wr_regs->rx_en);
+		if (cpsw->rx_irq_disabled) {
+			cpsw->rx_irq_disabled = false;
+			enable_irq(cpsw->irqs_table[0]);
+		}
+	}
+
+	return num_rx;
+}
+
+static inline void soft_reset(const char *module, void __iomem *reg)
+{
+	unsigned long timeout = jiffies + HZ;
+
+	writel_relaxed(1, reg);
+	do {
+		cpu_relax();
+	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
+
+	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
+}
+
+static void cpsw_set_slave_mac(struct cpsw_slave *slave,
+			       struct cpsw_priv *priv)
+{
+	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
+}
+
+static bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	u32 shift, mask, val;
+
+	val = readl_relaxed(&cpsw->regs->ptype);
+
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+	mask = 7 << shift;
+	val = val & mask;
+
+	return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	u32 shift, mask, val;
+
+	val = readl_relaxed(&cpsw->regs->ptype);
+
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+	mask = (1 << --fifo) << shift;
+	val = on ? val | mask : val & ~mask;
+
+	writel_relaxed(val, &cpsw->regs->ptype);
+}
+
+static void _cpsw_adjust_link(struct cpsw_slave *slave,
+			      struct cpsw_priv *priv, bool *link)
+{
+	struct phy_device	*phy = slave->phy;
+	u32			mac_control = 0;
+	u32			slave_port;
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	if (!phy)
+		return;
+
+	slave_port = cpsw_get_slave_port(slave->slave_num);
+
+	if (phy->link) {
+		mac_control = cpsw->data.mac_control;
+
+		/* enable forwarding */
+		cpsw_ale_control_set(cpsw->ale, slave_port,
+				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+		if (phy->speed == 1000)
+			mac_control |= BIT(7);	/* GIGABITEN	*/
+		if (phy->duplex)
+			mac_control |= BIT(0);	/* FULLDUPLEXEN	*/
+
+		/* set speed_in input in case RMII mode is used in 100Mbps */
+		if (phy->speed == 100)
+			mac_control |= BIT(15);
+		/* in band mode only works in 10Mbps RGMII mode */
+		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
+			mac_control |= BIT(18); /* In Band mode */
+
+		if (priv->rx_pause)
+			mac_control |= BIT(3);
+
+		if (priv->tx_pause)
+			mac_control |= BIT(4);
+
+		*link = true;
+
+		if (priv->shp_cfg_speed &&
+		    priv->shp_cfg_speed != slave->phy->speed &&
+		    !cpsw_shp_is_off(priv))
+			dev_warn(priv->dev,
+				 "Speed was changed, CBS shaper speeds are changed!");
+	} else {
+		mac_control = 0;
+		/* disable forwarding */
+		cpsw_ale_control_set(cpsw->ale, slave_port,
+				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+	}
+
+	if (mac_control != slave->mac_control) {
+		phy_print_status(phy);
+		writel_relaxed(mac_control, &slave->sliver->mac_control);
+	}
+
+	slave->mac_control = mac_control;
+}
+
+static int cpsw_get_common_speed(struct cpsw_common *cpsw)
+{
+	int i, speed;
+
+	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
+		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
+			speed += cpsw->slaves[i].phy->speed;
+
+	return speed;
+}
+
+static int cpsw_need_resplit(struct cpsw_common *cpsw)
+{
+	int i, rlim_ch_num;
+	int speed, ch_rate;
+
+	/* re-split resources only in case speed was changed */
+	speed = cpsw_get_common_speed(cpsw);
+	if (speed == cpsw->speed || !speed)
+		return 0;
+
+	cpsw->speed = speed;
+
+	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
+		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
+		if (!ch_rate)
+			break;
+
+		rlim_ch_num++;
+	}
+
+	/* cases not dependent on speed */
+	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
+		return 0;
+
+	return 1;
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+	struct cpsw_priv	*priv = netdev_priv(ndev);
+	struct cpsw_common	*cpsw = priv->cpsw;
+	bool			link = false;
+
+	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
+
+	if (link) {
+		if (cpsw_need_resplit(cpsw))
+			cpsw_split_res(ndev);
+
+		netif_carrier_on(ndev);
+		if (netif_running(ndev))
+			netif_tx_wake_all_queues(ndev);
+	} else {
+		netif_carrier_off(ndev);
+		netif_tx_stop_all_queues(ndev);
+	}
+}
+
+static int cpsw_get_coalesce(struct net_device *ndev,
+				struct ethtool_coalesce *coal)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	coal->rx_coalesce_usecs = cpsw->coal_intvl;
+	return 0;
+}
+
+static int cpsw_set_coalesce(struct net_device *ndev,
+				struct ethtool_coalesce *coal)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	u32 int_ctrl;
+	u32 num_interrupts = 0;
+	u32 prescale = 0;
+	u32 addnl_dvdr = 1;
+	u32 coal_intvl = 0;
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	coal_intvl = coal->rx_coalesce_usecs;
+
+	int_ctrl =  readl(&cpsw->wr_regs->int_control);
+	prescale = cpsw->bus_freq_mhz * 4;
+
+	if (!coal->rx_coalesce_usecs) {
+		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
+		goto update_return;
+	}
+
+	if (coal_intvl < CPSW_CMINTMIN_INTVL)
+		coal_intvl = CPSW_CMINTMIN_INTVL;
+
+	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+		/* Interrupt pacer works with 4us Pulse, we can
+		 * throttle further by dilating the 4us pulse.
+		 */
+		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+		if (addnl_dvdr > 1) {
+			prescale *= addnl_dvdr;
+			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+				coal_intvl = (CPSW_CMINTMAX_INTVL
+						* addnl_dvdr);
+		} else {
+			addnl_dvdr = 1;
+			coal_intvl = CPSW_CMINTMAX_INTVL;
+		}
+	}
+
+	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
+	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
+
+	int_ctrl |= CPSW_INTPACEEN;
+	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+
+update_return:
+	writel(int_ctrl, &cpsw->wr_regs->int_control);
+
+	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+	cpsw->coal_intvl = coal_intvl;
+
+	return 0;
+}
+
+static int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return (CPSW_STATS_COMMON_LEN +
+		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+		       CPSW_STATS_CH_LEN);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+	int ch_stats_len;
+	int line;
+	int i;
+
+	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+	for (i = 0; i < ch_stats_len; i++) {
+		line = i % CPSW_STATS_CH_LEN;
+		snprintf(*p, ETH_GSTRING_LEN,
+			 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
+			 (long)(i / CPSW_STATS_CH_LEN),
+			 cpsw_gstrings_ch_stats[line].stat_string);
+		*p += ETH_GSTRING_LEN;
+	}
+}
+
+static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
+			memcpy(p, cpsw_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
+		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
+		break;
+	}
+}
+
+static void cpsw_get_ethtool_stats(struct net_device *ndev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	u8 *p;
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	struct cpdma_chan_stats ch_stats;
+	int i, l, ch;
+
+	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
+	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+		data[l] = readl(cpsw->hw_stats +
+				cpsw_gstrings_stats[l].stat_offset);
+
+	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
+		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+			p = (u8 *)&ch_stats +
+				cpsw_gstrings_ch_stats[i].stat_offset;
+			data[l] = *(u32 *)p;
+		}
+	}
+
+	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+		cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
+		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+			p = (u8 *)&ch_stats +
+				cpsw_gstrings_ch_stats[i].stat_offset;
+			data[l] = *(u32 *)p;
+		}
+	}
+}
+
+static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
+					struct sk_buff *skb,
+					struct cpdma_chan *txch)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	skb_tx_timestamp(skb);
+	return cpdma_chan_submit(txch, skb, skb->data, skb->len,
+				 priv->emac_port + cpsw->data.dual_emac);
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+		struct cpsw_priv *priv, struct cpsw_slave *slave,
+		u32 slave_port)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
+
+	if (cpsw->version == CPSW_VERSION_1)
+		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+	else
+		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
+			  port_mask, port_mask, 0);
+	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+			   port_mask, ALE_VLAN, slave->port_vlan, 0);
+	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+			   HOST_PORT_NUM, ALE_VLAN |
+			   ALE_SECURE, slave->port_vlan);
+	cpsw_ale_control_set(cpsw->ale, slave_port,
+			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+}
+
+static void soft_reset_slave(struct cpsw_slave *slave)
+{
+	char name[32];
+
+	snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
+	soft_reset(name, &slave->sliver->soft_reset);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+	u32 slave_port;
+	struct phy_device *phy;
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	soft_reset_slave(slave);
+
+	/* setup priority mapping */
+	writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+
+	switch (cpsw->version) {
+	case CPSW_VERSION_1:
+		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+		/* Increase RX FIFO size to 5 for supporting fullduplex
+		 * flow control mode
+		 */
+		slave_write(slave,
+			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+		break;
+	case CPSW_VERSION_2:
+	case CPSW_VERSION_3:
+	case CPSW_VERSION_4:
+		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+		/* Increase RX FIFO size to 5 for supporting fullduplex
+		 * flow control mode
+		 */
+		slave_write(slave,
+			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+		break;
+	}
+
+	/* setup max packet size, and mac address */
+	writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+	cpsw_set_slave_mac(slave, priv);
+
+	slave->mac_control = 0;	/* no link yet */
+
+	slave_port = cpsw_get_slave_port(slave->slave_num);
+
+	if (cpsw->data.dual_emac)
+		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+	else
+		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
+
+	if (slave->data->phy_node) {
+		phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+				 &cpsw_adjust_link, 0, slave->data->phy_if);
+		if (!phy) {
+			dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
+				slave->data->phy_node,
+				slave->slave_num);
+			return;
+		}
+	} else {
+		phy = phy_connect(priv->ndev, slave->data->phy_id,
+				 &cpsw_adjust_link, slave->data->phy_if);
+		if (IS_ERR(phy)) {
+			dev_err(priv->dev,
+				"phy \"%s\" not found on slave %d, err %ld\n",
+				slave->data->phy_id, slave->slave_num,
+				PTR_ERR(phy));
+			return;
+		}
+	}
+
+	slave->phy = phy;
+
+	phy_attached_info(slave->phy);
+
+	phy_start(slave->phy);
+
+	/* Configure GMII_SEL register */
+	cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
+}
+
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	const int vlan = cpsw->data.default_vlan;
+	u32 reg;
+	int i;
+	int unreg_mcast_mask;
+
+	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+	       CPSW2_PORT_VLAN;
+
+	writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+	for (i = 0; i < cpsw->data.slaves; i++)
+		slave_write(cpsw->slaves + i, vlan, reg);
+
+	if (priv->ndev->flags & IFF_ALLMULTI)
+		unreg_mcast_mask = ALE_ALL_PORTS;
+	else
+		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+
+	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
+			  ALE_ALL_PORTS, ALE_ALL_PORTS,
+			  unreg_mcast_mask);
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+	u32 fifo_mode;
+	u32 control_reg;
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	/* soft reset the controller and initialize ale */
+	soft_reset("cpsw", &cpsw->regs->soft_reset);
+	cpsw_ale_start(cpsw->ale);
+
+	/* switch to vlan unaware mode */
+	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+			     CPSW_ALE_VLAN_AWARE);
+	control_reg = readl(&cpsw->regs->control);
+	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
+	writel(control_reg, &cpsw->regs->control);
+	fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+		     CPSW_FIFO_NORMAL_MODE;
+	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
+
+	/* setup host port priority mapping */
+	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+		       &cpsw->host_port_regs->cpdma_tx_pri_map);
+	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+
+	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+	if (!cpsw->data.dual_emac) {
+		cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+				   0, 0);
+		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
+	}
+}
+
+static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct sk_buff *skb;
+	int ch_buf_num;
+	int ch, i, ret;
+
+	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+		for (i = 0; i < ch_buf_num; i++) {
+			skb = __netdev_alloc_skb_ip_align(priv->ndev,
+							  cpsw->rx_packet_max,
+							  GFP_KERNEL);
+			if (!skb) {
+				cpsw_err(priv, ifup, "cannot allocate skb\n");
+				return -ENOMEM;
+			}
+
+			skb_set_queue_mapping(skb, ch);
+			ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
+						skb->data, skb_tailroom(skb),
+						0);
+			if (ret < 0) {
+				cpsw_err(priv, ifup,
+					 "cannot submit skb to channel %d rx, error %d\n",
+					 ch, ret);
+				kfree_skb(skb);
+				return ret;
+			}
+			kmemleak_not_leak(skb);
+		}
+
+		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+			  ch, ch_buf_num);
+	}
+
+	return 0;
+}
+
+static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
+{
+	u32 slave_port;
+
+	slave_port = cpsw_get_slave_port(slave->slave_num);
+
+	if (!slave->phy)
+		return;
+	phy_stop(slave->phy);
+	phy_disconnect(slave->phy);
+	slave->phy = NULL;
+	cpsw_ale_control_set(cpsw->ale, slave_port,
+			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+	soft_reset_slave(slave);
+}
+
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+	if (tc == num_tc - 1)
+		return 0;
+
+	return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	u32 val = 0, send_pct, shift;
+	struct cpsw_slave *slave;
+	int pct = 0, i;
+
+	if (bw > priv->shp_cfg_speed * 1000)
+		goto err;
+
+	/* shaping has to stay enabled for highest fifos linearly
+	 * and fifo bw no more then interface can allow
+	 */
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+	send_pct = slave_read(slave, SEND_PERCENT);
+	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+		if (!bw) {
+			if (i >= fifo || !priv->fifo_bw[i])
+				continue;
+
+			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+			continue;
+		}
+
+		if (!priv->fifo_bw[i] && i > fifo) {
+			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+			return -EINVAL;
+		}
+
+		shift = (i - 1) * 8;
+		if (i == fifo) {
+			send_pct &= ~(CPSW_PCT_MASK << shift);
+			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+			if (!val)
+				val = 1;
+
+			send_pct |= val << shift;
+			pct += val;
+			continue;
+		}
+
+		if (priv->fifo_bw[i])
+			pct += (send_pct >> shift) & CPSW_PCT_MASK;
+	}
+
+	if (pct >= 100)
+		goto err;
+
+	slave_write(slave, send_pct, SEND_PERCENT);
+	priv->fifo_bw[fifo] = bw;
+
+	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+	return 0;
+err:
+	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+	return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	u32 tx_in_ctl_rg, val;
+	int ret;
+
+	ret = cpsw_set_fifo_bw(priv, fifo, bw);
+	if (ret)
+		return ret;
+
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+	if (!bw)
+		cpsw_fifo_shp_on(priv, fifo, bw);
+
+	val = slave_read(slave, tx_in_ctl_rg);
+	if (cpsw_shp_is_off(priv)) {
+		/* disable FIFOs rate limited queues */
+		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+		/* set type of FIFO queues to normal priority mode */
+		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+		/* set type of FIFO queues to be rate limited */
+		if (bw)
+			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+		else
+			priv->shp_cfg_speed = 0;
+	}
+
+	/* toggle a FIFO rate limited queue */
+	if (bw)
+		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+	else
+		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+	slave_write(slave, val, tx_in_ctl_rg);
+
+	/* FIFO transmit shape enable */
+	cpsw_fifo_shp_on(priv, fifo, bw);
+	return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+			struct tc_cbs_qopt_offload *qopt)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	int prev_speed = 0;
+	int tc, ret, fifo;
+	u32 bw = 0;
+
+	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+	/* enable channels in backward order, as highest FIFOs must be rate
+	 * limited first and for compliance with CPDMA rate limited channels
+	 * that also used in bacward order. FIFO0 cannot be rate limited.
+	 */
+	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+	if (!fifo) {
+		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+		return -EINVAL;
+	}
+
+	/* do nothing, it's disabled anyway */
+	if (!qopt->enable && !priv->fifo_bw[fifo])
+		return 0;
+
+	/* shapers can be set if link speed is known */
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+	if (slave->phy && slave->phy->link) {
+		if (priv->shp_cfg_speed &&
+		    priv->shp_cfg_speed != slave->phy->speed)
+			prev_speed = priv->shp_cfg_speed;
+
+		priv->shp_cfg_speed = slave->phy->speed;
+	}
+
+	if (!priv->shp_cfg_speed) {
+		dev_err(priv->dev, "Link speed is not known");
+		return -1;
+	}
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	bw = qopt->enable ? qopt->idleslope : 0;
+	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+	if (ret) {
+		priv->shp_cfg_speed = prev_speed;
+		prev_speed = 0;
+	}
+
+	if (bw && prev_speed)
+		dev_warn(priv->dev,
+			 "Speed was changed, CBS shaper speeds are changed!");
+
+	pm_runtime_put_sync(cpsw->dev);
+	return ret;
+}
+
+static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+	int fifo, bw;
+
+	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+		bw = priv->fifo_bw[fifo];
+		if (!bw)
+			continue;
+
+		cpsw_set_fifo_rlimit(priv, fifo, bw);
+	}
+}
+
+static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	u32 tx_prio_map = 0;
+	int i, tc, fifo;
+	u32 tx_prio_rg;
+
+	if (!priv->mqprio_hw)
+		return;
+
+	for (i = 0; i < 8; i++) {
+		tc = netdev_get_prio_tc_map(priv->ndev, i);
+		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+		tx_prio_map |= fifo << (4 * i);
+	}
+
+	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+	slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+	/* restore MQPRIO offload */
+	for_each_slave(priv, cpsw_mqprio_resume, priv);
+
+	/* restore CBS offload */
+	for_each_slave(priv, cpsw_cbs_resume, priv);
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int ret;
+	u32 reg;
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	netif_carrier_off(ndev);
+
+	/* Notify the stack of the actual queue counts. */
+	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+	if (ret) {
+		dev_err(priv->dev, "cannot set real number of tx queues\n");
+		goto err_cleanup;
+	}
+
+	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+	if (ret) {
+		dev_err(priv->dev, "cannot set real number of rx queues\n");
+		goto err_cleanup;
+	}
+
+	reg = cpsw->version;
+
+	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
+		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
+		 CPSW_RTL_VERSION(reg));
+
+	/* Initialize host and slave ports */
+	if (!cpsw->usage_count)
+		cpsw_init_host_port(priv);
+	for_each_slave(priv, cpsw_slave_open, priv);
+
+	/* Add default VLAN */
+	if (!cpsw->data.dual_emac)
+		cpsw_add_default_vlan(priv);
+	else
+		cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
+				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
+
+	/* initialize shared resources for every ndev */
+	if (!cpsw->usage_count) {
+		/* disable priority elevation */
+		writel_relaxed(0, &cpsw->regs->ptype);
+
+		/* enable statistics collection only on all ports */
+		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
+
+		/* Enable internal fifo flow control */
+		writel(0x7, &cpsw->regs->flow_control);
+
+		napi_enable(&cpsw->napi_rx);
+		napi_enable(&cpsw->napi_tx);
+
+		if (cpsw->tx_irq_disabled) {
+			cpsw->tx_irq_disabled = false;
+			enable_irq(cpsw->irqs_table[1]);
+		}
+
+		if (cpsw->rx_irq_disabled) {
+			cpsw->rx_irq_disabled = false;
+			enable_irq(cpsw->irqs_table[0]);
+		}
+
+		ret = cpsw_fill_rx_channels(priv);
+		if (ret < 0)
+			goto err_cleanup;
+
+		if (cpts_register(cpsw->cpts))
+			dev_err(priv->dev, "error registering cpts device\n");
+
+	}
+
+	cpsw_restore(priv);
+
+	/* Enable Interrupt pacing if configured */
+	if (cpsw->coal_intvl != 0) {
+		struct ethtool_coalesce coal;
+
+		coal.rx_coalesce_usecs = cpsw->coal_intvl;
+		cpsw_set_coalesce(ndev, &coal);
+	}
+
+	cpdma_ctlr_start(cpsw->dma);
+	cpsw_intr_enable(cpsw);
+	cpsw->usage_count++;
+
+	return 0;
+
+err_cleanup:
+	cpdma_ctlr_stop(cpsw->dma);
+	for_each_slave(priv, cpsw_slave_stop, cpsw);
+	pm_runtime_put_sync(cpsw->dev);
+	netif_carrier_off(priv->ndev);
+	return ret;
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
+	netif_tx_stop_all_queues(priv->ndev);
+	netif_carrier_off(priv->ndev);
+
+	if (cpsw->usage_count <= 1) {
+		napi_disable(&cpsw->napi_rx);
+		napi_disable(&cpsw->napi_tx);
+		cpts_unregister(cpsw->cpts);
+		cpsw_intr_disable(cpsw);
+		cpdma_ctlr_stop(cpsw->dma);
+		cpsw_ale_stop(cpsw->ale);
+	}
+	for_each_slave(priv, cpsw_slave_stop, cpsw);
+
+	if (cpsw_need_resplit(cpsw))
+		cpsw_split_res(ndev);
+
+	cpsw->usage_count--;
+	pm_runtime_put_sync(cpsw->dev);
+	return 0;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+				       struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpts *cpts = cpsw->cpts;
+	struct netdev_queue *txq;
+	struct cpdma_chan *txch;
+	int ret, q_idx;
+
+	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+		cpsw_err(priv, tx_err, "packet pad failed\n");
+		ndev->stats.tx_dropped++;
+		return NET_XMIT_DROP;
+	}
+
+	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+	    cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+	q_idx = skb_get_queue_mapping(skb);
+	if (q_idx >= cpsw->tx_ch_num)
+		q_idx = q_idx % cpsw->tx_ch_num;
+
+	txch = cpsw->txv[q_idx].ch;
+	txq = netdev_get_tx_queue(ndev, q_idx);
+	ret = cpsw_tx_packet_submit(priv, skb, txch);
+	if (unlikely(ret != 0)) {
+		cpsw_err(priv, tx_err, "desc submit failed\n");
+		goto fail;
+	}
+
+	/* If there is no more tx desc left free then we need to
+	 * tell the kernel to stop sending us tx frames.
+	 */
+	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+		netif_tx_stop_queue(txq);
+
+		/* Barrier, so that stop_queue visible to other cpus */
+		smp_mb__after_atomic();
+
+		if (cpdma_check_free_tx_desc(txch))
+			netif_tx_wake_queue(txq);
+	}
+
+	return NETDEV_TX_OK;
+fail:
+	ndev->stats.tx_dropped++;
+	netif_tx_stop_queue(txq);
+
+	/* Barrier, so that stop_queue visible to other cpus */
+	smp_mb__after_atomic();
+
+	if (cpdma_check_free_tx_desc(txch))
+		netif_tx_wake_queue(txq);
+
+	return NETDEV_TX_BUSY;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+{
+	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
+	u32 ts_en, seq_id;
+
+	if (!cpts_is_tx_enabled(cpsw->cpts) &&
+	    !cpts_is_rx_enabled(cpsw->cpts)) {
+		slave_write(slave, 0, CPSW1_TS_CTL);
+		return;
+	}
+
+	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+	if (cpts_is_tx_enabled(cpsw->cpts))
+		ts_en |= CPSW_V1_TS_TX_EN;
+
+	if (cpts_is_rx_enabled(cpsw->cpts))
+		ts_en |= CPSW_V1_TS_RX_EN;
+
+	slave_write(slave, ts_en, CPSW1_TS_CTL);
+	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+	struct cpsw_slave *slave;
+	struct cpsw_common *cpsw = priv->cpsw;
+	u32 ctrl, mtype;
+
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+
+	ctrl = slave_read(slave, CPSW2_CONTROL);
+	switch (cpsw->version) {
+	case CPSW_VERSION_2:
+		ctrl &= ~CTRL_V2_ALL_TS_MASK;
+
+		if (cpts_is_tx_enabled(cpsw->cpts))
+			ctrl |= CTRL_V2_TX_TS_BITS;
+
+		if (cpts_is_rx_enabled(cpsw->cpts))
+			ctrl |= CTRL_V2_RX_TS_BITS;
+		break;
+	case CPSW_VERSION_3:
+	default:
+		ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+		if (cpts_is_tx_enabled(cpsw->cpts))
+			ctrl |= CTRL_V3_TX_TS_BITS;
+
+		if (cpts_is_rx_enabled(cpsw->cpts))
+			ctrl |= CTRL_V3_RX_TS_BITS;
+		break;
+	}
+
+	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+	slave_write(slave, ctrl, CPSW2_CONTROL);
+	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+	struct cpsw_priv *priv = netdev_priv(dev);
+	struct hwtstamp_config cfg;
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpts *cpts = cpsw->cpts;
+
+	if (cpsw->version != CPSW_VERSION_1 &&
+	    cpsw->version != CPSW_VERSION_2 &&
+	    cpsw->version != CPSW_VERSION_3)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (cfg.flags)
+		return -EINVAL;
+
+	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+		return -ERANGE;
+
+	switch (cfg.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		cpts_rx_enable(cpts, 0);
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_NTP_ALL:
+		return -ERANGE;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+
+	switch (cpsw->version) {
+	case CPSW_VERSION_1:
+		cpsw_hwtstamp_v1(cpsw);
+		break;
+	case CPSW_VERSION_2:
+	case CPSW_VERSION_3:
+		cpsw_hwtstamp_v2(priv);
+		break;
+	default:
+		WARN_ON(1);
+	}
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+	struct cpts *cpts = cpsw->cpts;
+	struct hwtstamp_config cfg;
+
+	if (cpsw->version != CPSW_VERSION_1 &&
+	    cpsw->version != CPSW_VERSION_2 &&
+	    cpsw->version != CPSW_VERSION_3)
+		return -EOPNOTSUPP;
+
+	cfg.flags = 0;
+	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
+		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
+			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	return -EOPNOTSUPP;
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+	return -EOPNOTSUPP;
+}
+#endif /*CONFIG_TI_CPTS*/
+
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	struct cpsw_priv *priv = netdev_priv(dev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return cpsw_hwtstamp_set(dev, req);
+	case SIOCGHWTSTAMP:
+		return cpsw_hwtstamp_get(dev, req);
+	}
+
+	if (!cpsw->slaves[slave_no].phy)
+		return -EOPNOTSUPP;
+	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+}
+
+static void cpsw_ndo_tx_timeout(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int ch;
+
+	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
+	ndev->stats.tx_errors++;
+	cpsw_intr_disable(cpsw);
+	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+		cpdma_chan_stop(cpsw->txv[ch].ch);
+		cpdma_chan_start(cpsw->txv[ch].ch);
+	}
+
+	cpsw_intr_enable(cpsw);
+	netif_trans_update(ndev);
+	netif_tx_wake_all_queues(ndev);
+}
+
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct sockaddr *addr = (struct sockaddr *)p;
+	struct cpsw_common *cpsw = priv->cpsw;
+	int flags = 0;
+	u16 vid = 0;
+	int ret;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	if (cpsw->data.dual_emac) {
+		vid = cpsw->slaves[priv->emac_port].port_vlan;
+		flags = ALE_VLAN;
+	}
+
+	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+			   flags, vid);
+	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
+			   flags, vid);
+
+	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+	for_each_slave(priv, cpsw_set_slave_mac, priv);
+
+	pm_runtime_put(cpsw->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	cpsw_intr_disable(cpsw);
+	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+	cpsw_intr_enable(cpsw);
+}
+#endif
+
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+				unsigned short vid)
+{
+	int ret;
+	int unreg_mcast_mask = 0;
+	u32 port_mask;
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	if (cpsw->data.dual_emac) {
+		port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+
+		if (priv->ndev->flags & IFF_ALLMULTI)
+			unreg_mcast_mask = port_mask;
+	} else {
+		port_mask = ALE_ALL_PORTS;
+
+		if (priv->ndev->flags & IFF_ALLMULTI)
+			unreg_mcast_mask = ALE_ALL_PORTS;
+		else
+			unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+	}
+
+	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
+				unreg_mcast_mask);
+	if (ret != 0)
+		return ret;
+
+	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+				 HOST_PORT_NUM, ALE_VLAN, vid);
+	if (ret != 0)
+		goto clean_vid;
+
+	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+				 port_mask, ALE_VLAN, vid, 0);
+	if (ret != 0)
+		goto clean_vlan_ucast;
+	return 0;
+
+clean_vlan_ucast:
+	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+			   HOST_PORT_NUM, ALE_VLAN, vid);
+clean_vid:
+	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+	return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+				    __be16 proto, u16 vid)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int ret;
+
+	if (vid == cpsw->data.default_vlan)
+		return 0;
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	if (cpsw->data.dual_emac) {
+		/* In dual EMAC, reserved VLAN id should not be used for
+		 * creating VLAN interfaces as this can break the dual
+		 * EMAC port separation
+		 */
+		int i;
+
+		for (i = 0; i < cpsw->data.slaves; i++) {
+			if (vid == cpsw->slaves[i].port_vlan) {
+				ret = -EINVAL;
+				goto err;
+			}
+		}
+	}
+
+	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+	ret = cpsw_add_vlan_ale_entry(priv, vid);
+err:
+	pm_runtime_put(cpsw->dev);
+	return ret;
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+				     __be16 proto, u16 vid)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int ret;
+
+	if (vid == cpsw->data.default_vlan)
+		return 0;
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	if (cpsw->data.dual_emac) {
+		int i;
+
+		for (i = 0; i < cpsw->data.slaves; i++) {
+			if (vid == cpsw->slaves[i].port_vlan)
+				goto err;
+		}
+	}
+
+	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+	ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+				  HOST_PORT_NUM, ALE_VLAN, vid);
+	ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+				  0, ALE_VLAN, vid);
+err:
+	pm_runtime_put(cpsw->dev);
+	return ret;
+}
+
+static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	u32 min_rate;
+	u32 ch_rate;
+	int i, ret;
+
+	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+	if (ch_rate == rate)
+		return 0;
+
+	ch_rate = rate * 1000;
+	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
+	if ((ch_rate < min_rate && ch_rate)) {
+		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
+			min_rate);
+		return -EINVAL;
+	}
+
+	if (rate > cpsw->speed) {
+		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
+		return -EINVAL;
+	}
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
+	pm_runtime_put(cpsw->dev);
+
+	if (ret)
+		return ret;
+
+	/* update rates for slaves tx queues */
+	for (i = 0; i < cpsw->data.slaves; i++) {
+		slave = &cpsw->slaves[i];
+		if (!slave->ndev)
+			continue;
+
+		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
+	}
+
+	cpsw_split_res(ndev);
+	return ret;
+}
+
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+	struct tc_mqprio_qopt_offload *mqprio = type_data;
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int fifo, num_tc, count, offset;
+	struct cpsw_slave *slave;
+	u32 tx_prio_map = 0;
+	int i, tc, ret;
+
+	num_tc = mqprio->qopt.num_tc;
+	if (num_tc > CPSW_TC_NUM)
+		return -EINVAL;
+
+	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(cpsw->dev);
+		return ret;
+	}
+
+	if (num_tc) {
+		for (i = 0; i < 8; i++) {
+			tc = mqprio->qopt.prio_tc_map[i];
+			fifo = cpsw_tc_to_fifo(tc, num_tc);
+			tx_prio_map |= fifo << (4 * i);
+		}
+
+		netdev_set_num_tc(ndev, num_tc);
+		for (i = 0; i < num_tc; i++) {
+			count = mqprio->qopt.count[i];
+			offset = mqprio->qopt.offset[i];
+			netdev_set_tc_queue(ndev, i, count, offset);
+		}
+	}
+
+	if (!mqprio->qopt.hw) {
+		/* restore default configuration */
+		netdev_reset_tc(ndev);
+		tx_prio_map = TX_PRIORITY_MAPPING;
+	}
+
+	priv->mqprio_hw = mqprio->qopt.hw;
+
+	offset = cpsw->version == CPSW_VERSION_1 ?
+		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+	slave_write(slave, tx_prio_map, offset);
+
+	pm_runtime_put_sync(cpsw->dev);
+
+	return 0;
+}
+
+static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+			     void *type_data)
+{
+	switch (type) {
+	case TC_SETUP_QDISC_CBS:
+		return cpsw_set_cbs(ndev, type_data);
+
+	case TC_SETUP_QDISC_MQPRIO:
+		return cpsw_set_mqprio(ndev, type_data);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static const struct net_device_ops cpsw_netdev_ops = {
+	.ndo_open		= cpsw_ndo_open,
+	.ndo_stop		= cpsw_ndo_stop,
+	.ndo_start_xmit		= cpsw_ndo_start_xmit,
+	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
+	.ndo_do_ioctl		= cpsw_ndo_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
+	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
+	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= cpsw_ndo_poll_controller,
+#endif
+	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
+	.ndo_setup_tc           = cpsw_ndo_setup_tc,
+};
+
+static int cpsw_get_regs_len(struct net_device *ndev)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+}
+
+static void cpsw_get_regs(struct net_device *ndev,
+			  struct ethtool_regs *regs, void *p)
+{
+	u32 *reg = p;
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	/* update CPSW IP version */
+	regs->version = cpsw->version;
+
+	cpsw_ale_dump(cpsw->ale, reg);
+}
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+			     struct ethtool_drvinfo *info)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	struct platform_device	*pdev = to_platform_device(cpsw->dev);
+
+	strlcpy(info->driver, "cpsw", sizeof(info->driver));
+	strlcpy(info->version, "1.0", sizeof(info->version));
+	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static u32 cpsw_get_msglevel(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	return priv->msg_enable;
+}
+
+static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	priv->msg_enable = value;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static int cpsw_get_ts_info(struct net_device *ndev,
+			    struct ethtool_ts_info *info)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = cpsw->cpts->phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+	return 0;
+}
+#else
+static int cpsw_get_ts_info(struct net_device *ndev,
+			    struct ethtool_ts_info *info)
+{
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE;
+	info->phc_index = -1;
+	info->tx_types = 0;
+	info->rx_filters = 0;
+	return 0;
+}
+#endif
+
+static int cpsw_get_link_ksettings(struct net_device *ndev,
+				   struct ethtool_link_ksettings *ecmd)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (!cpsw->slaves[slave_no].phy)
+		return -EOPNOTSUPP;
+
+	phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
+	return 0;
+}
+
+static int cpsw_set_link_ksettings(struct net_device *ndev,
+				   const struct ethtool_link_ksettings *ecmd)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (cpsw->slaves[slave_no].phy)
+		return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
+						 ecmd);
+	else
+		return -EOPNOTSUPP;
+}
+
+static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+	if (cpsw->slaves[slave_no].phy)
+		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
+}
+
+static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (cpsw->slaves[slave_no].phy)
+		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
+	else
+		return -EOPNOTSUPP;
+}
+
+static void cpsw_get_pauseparam(struct net_device *ndev,
+				struct ethtool_pauseparam *pause)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+
+	pause->autoneg = AUTONEG_DISABLE;
+	pause->rx_pause = priv->rx_pause ? true : false;
+	pause->tx_pause = priv->tx_pause ? true : false;
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+			       struct ethtool_pauseparam *pause)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	bool link;
+
+	priv->rx_pause = pause->rx_pause ? true : false;
+	priv->tx_pause = pause->tx_pause ? true : false;
+
+	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
+	return 0;
+}
+
+static int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int ret;
+
+	ret = pm_runtime_get_sync(cpsw->dev);
+	if (ret < 0) {
+		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+		pm_runtime_put_noidle(cpsw->dev);
+	}
+
+	return ret;
+}
+
+static void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	ret = pm_runtime_put(priv->cpsw->dev);
+	if (ret < 0)
+		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
+static void cpsw_get_channels(struct net_device *ndev,
+			      struct ethtool_channels *ch)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+	ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+	ch->max_combined = 0;
+	ch->max_other = 0;
+	ch->other_count = 0;
+	ch->rx_count = cpsw->rx_ch_num;
+	ch->tx_count = cpsw->tx_ch_num;
+	ch->combined_count = 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+				  struct ethtool_channels *ch)
+{
+	if (cpsw->quirk_irq) {
+		dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
+		return -EOPNOTSUPP;
+	}
+
+	if (ch->combined_count)
+		return -EINVAL;
+
+	/* verify we have at least one channel in each direction */
+	if (!ch->rx_count || !ch->tx_count)
+		return -EINVAL;
+
+	if (ch->rx_count > cpsw->data.channels ||
+	    ch->tx_count > cpsw->data.channels)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
+{
+	struct cpsw_common *cpsw = priv->cpsw;
+	void (*handler)(void *, int, int);
+	struct netdev_queue *queue;
+	struct cpsw_vector *vec;
+	int ret, *ch, vch;
+
+	if (rx) {
+		ch = &cpsw->rx_ch_num;
+		vec = cpsw->rxv;
+		handler = cpsw_rx_handler;
+	} else {
+		ch = &cpsw->tx_ch_num;
+		vec = cpsw->txv;
+		handler = cpsw_tx_handler;
+	}
+
+	while (*ch < ch_num) {
+		vch = rx ? *ch : 7 - *ch;
+		vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
+		queue = netdev_get_tx_queue(priv->ndev, *ch);
+		queue->tx_maxrate = 0;
+
+		if (IS_ERR(vec[*ch].ch))
+			return PTR_ERR(vec[*ch].ch);
+
+		if (!vec[*ch].ch)
+			return -EINVAL;
+
+		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+			  (rx ? "rx" : "tx"));
+		(*ch)++;
+	}
+
+	while (*ch > ch_num) {
+		(*ch)--;
+
+		ret = cpdma_chan_destroy(vec[*ch].ch);
+		if (ret)
+			return ret;
+
+		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+			  (rx ? "rx" : "tx"));
+	}
+
+	return 0;
+}
+
+static int cpsw_update_channels(struct cpsw_priv *priv,
+				struct ethtool_channels *ch)
+{
+	int ret;
+
+	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
+	if (ret)
+		return ret;
+
+	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void cpsw_suspend_data_pass(struct net_device *ndev)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	struct cpsw_slave *slave;
+	int i;
+
+	/* Disable NAPI scheduling */
+	cpsw_intr_disable(cpsw);
+
+	/* Stop all transmit queues for every network device.
+	 * Disable re-using rx descriptors with dormant_on.
+	 */
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+		if (!(slave->ndev && netif_running(slave->ndev)))
+			continue;
+
+		netif_tx_stop_all_queues(slave->ndev);
+		netif_dormant_on(slave->ndev);
+	}
+
+	/* Handle rest of tx packets and stop cpdma channels */
+	cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	int i, ret;
+
+	/* Allow rx packets handling */
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+		if (slave->ndev && netif_running(slave->ndev))
+			netif_dormant_off(slave->ndev);
+
+	/* After this receive is started */
+	if (cpsw->usage_count) {
+		ret = cpsw_fill_rx_channels(priv);
+		if (ret)
+			return ret;
+
+		cpdma_ctlr_start(cpsw->dma);
+		cpsw_intr_enable(cpsw);
+	}
+
+	/* Resume transmit for every affected interface */
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+		if (slave->ndev && netif_running(slave->ndev))
+			netif_tx_start_all_queues(slave->ndev);
+
+	return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+			     struct ethtool_channels *chs)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	int i, ret;
+
+	ret = cpsw_check_ch_settings(cpsw, chs);
+	if (ret < 0)
+		return ret;
+
+	cpsw_suspend_data_pass(ndev);
+	ret = cpsw_update_channels(priv, chs);
+	if (ret)
+		goto err;
+
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+		if (!(slave->ndev && netif_running(slave->ndev)))
+			continue;
+
+		/* Inform stack about new count of queues */
+		ret = netif_set_real_num_tx_queues(slave->ndev,
+						   cpsw->tx_ch_num);
+		if (ret) {
+			dev_err(priv->dev, "cannot set real number of tx queues\n");
+			goto err;
+		}
+
+		ret = netif_set_real_num_rx_queues(slave->ndev,
+						   cpsw->rx_ch_num);
+		if (ret) {
+			dev_err(priv->dev, "cannot set real number of rx queues\n");
+			goto err;
+		}
+	}
+
+	if (cpsw->usage_count)
+		cpsw_split_res(ndev);
+
+	ret = cpsw_resume_data_pass(ndev);
+	if (!ret)
+		return 0;
+err:
+	dev_err(priv->dev, "cannot update channels number, closing device\n");
+	dev_close(ndev);
+	return ret;
+}
+
+static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (cpsw->slaves[slave_no].phy)
+		return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
+	else
+		return -EOPNOTSUPP;
+}
+
+static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (cpsw->slaves[slave_no].phy)
+		return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
+	else
+		return -EOPNOTSUPP;
+}
+
+static int cpsw_nway_reset(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int slave_no = cpsw_slave_index(cpsw, priv);
+
+	if (cpsw->slaves[slave_no].phy)
+		return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
+	else
+		return -EOPNOTSUPP;
+}
+
+static void cpsw_get_ringparam(struct net_device *ndev,
+			       struct ethtool_ringparam *ering)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+
+	/* not supported */
+	ering->tx_max_pending = 0;
+	ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
+	ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
+	ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
+}
+
+static int cpsw_set_ringparam(struct net_device *ndev,
+			      struct ethtool_ringparam *ering)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	int ret;
+
+	/* ignore ering->tx_pending - only rx_pending adjustment is supported */
+
+	if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
+	    ering->rx_pending < CPSW_MAX_QUEUES ||
+	    ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
+		return -EINVAL;
+
+	if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+		return 0;
+
+	cpsw_suspend_data_pass(ndev);
+
+	cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+
+	if (cpsw->usage_count)
+		cpdma_chan_split_pool(cpsw->dma);
+
+	ret = cpsw_resume_data_pass(ndev);
+	if (!ret)
+		return 0;
+
+	dev_err(&ndev->dev, "cannot set ring params, closing device\n");
+	dev_close(ndev);
+	return ret;
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+	.get_drvinfo	= cpsw_get_drvinfo,
+	.get_msglevel	= cpsw_get_msglevel,
+	.set_msglevel	= cpsw_set_msglevel,
+	.get_link	= ethtool_op_get_link,
+	.get_ts_info	= cpsw_get_ts_info,
+	.get_coalesce	= cpsw_get_coalesce,
+	.set_coalesce	= cpsw_set_coalesce,
+	.get_sset_count		= cpsw_get_sset_count,
+	.get_strings		= cpsw_get_strings,
+	.get_ethtool_stats	= cpsw_get_ethtool_stats,
+	.get_pauseparam		= cpsw_get_pauseparam,
+	.set_pauseparam		= cpsw_set_pauseparam,
+	.get_wol	= cpsw_get_wol,
+	.set_wol	= cpsw_set_wol,
+	.get_regs_len	= cpsw_get_regs_len,
+	.get_regs	= cpsw_get_regs,
+	.begin		= cpsw_ethtool_op_begin,
+	.complete	= cpsw_ethtool_op_complete,
+	.get_channels	= cpsw_get_channels,
+	.set_channels	= cpsw_set_channels,
+	.get_link_ksettings	= cpsw_get_link_ksettings,
+	.set_link_ksettings	= cpsw_set_link_ksettings,
+	.get_eee	= cpsw_get_eee,
+	.set_eee	= cpsw_set_eee,
+	.nway_reset	= cpsw_nway_reset,
+	.get_ringparam = cpsw_get_ringparam,
+	.set_ringparam = cpsw_set_ringparam,
+};
+
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
+			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
+{
+	void __iomem		*regs = cpsw->regs;
+	int			slave_num = slave->slave_num;
+	struct cpsw_slave_data	*data = cpsw->data.slave_data + slave_num;
+
+	slave->data	= data;
+	slave->regs	= regs + slave_reg_ofs;
+	slave->sliver	= regs + sliver_reg_ofs;
+	slave->port_vlan = data->dual_emac_res_vlan;
+}
+
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+			 struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *slave_node;
+	int i = 0, ret;
+	u32 prop;
+
+	if (!node)
+		return -EINVAL;
+
+	if (of_property_read_u32(node, "slaves", &prop)) {
+		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
+		return -EINVAL;
+	}
+	data->slaves = prop;
+
+	if (of_property_read_u32(node, "active_slave", &prop)) {
+		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
+		return -EINVAL;
+	}
+	data->active_slave = prop;
+
+	data->slave_data = devm_kcalloc(&pdev->dev,
+					data->slaves,
+					sizeof(struct cpsw_slave_data),
+					GFP_KERNEL);
+	if (!data->slave_data)
+		return -ENOMEM;
+
+	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
+		return -EINVAL;
+	}
+	data->channels = prop;
+
+	if (of_property_read_u32(node, "ale_entries", &prop)) {
+		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
+		return -EINVAL;
+	}
+	data->ale_entries = prop;
+
+	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
+		return -EINVAL;
+	}
+	data->bd_ram_size = prop;
+
+	if (of_property_read_u32(node, "mac_control", &prop)) {
+		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
+		return -EINVAL;
+	}
+	data->mac_control = prop;
+
+	if (of_property_read_bool(node, "dual_emac"))
+		data->dual_emac = 1;
+
+	/*
+	 * Populate all the child nodes here...
+	 */
+	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+	/* We do not want to force this, as in some cases may not have child */
+	if (ret)
+		dev_warn(&pdev->dev, "Doesn't have any child node\n");
+
+	for_each_available_child_of_node(node, slave_node) {
+		struct cpsw_slave_data *slave_data = data->slave_data + i;
+		const void *mac_addr = NULL;
+		int lenp;
+		const __be32 *parp;
+
+		/* This is no slave child node, continue */
+		if (strcmp(slave_node->name, "slave"))
+			continue;
+
+		slave_data->phy_node = of_parse_phandle(slave_node,
+							"phy-handle", 0);
+		parp = of_get_property(slave_node, "phy_id", &lenp);
+		if (slave_data->phy_node) {
+			dev_dbg(&pdev->dev,
+				"slave[%d] using phy-handle=\"%pOF\"\n",
+				i, slave_data->phy_node);
+		} else if (of_phy_is_fixed_link(slave_node)) {
+			/* In the case of a fixed PHY, the DT node associated
+			 * to the PHY is the Ethernet MAC DT node.
+			 */
+			ret = of_phy_register_fixed_link(slave_node);
+			if (ret) {
+				if (ret != -EPROBE_DEFER)
+					dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
+				return ret;
+			}
+			slave_data->phy_node = of_node_get(slave_node);
+		} else if (parp) {
+			u32 phyid;
+			struct device_node *mdio_node;
+			struct platform_device *mdio;
+
+			if (lenp != (sizeof(__be32) * 2)) {
+				dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
+				goto no_phy_slave;
+			}
+			mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+			phyid = be32_to_cpup(parp+1);
+			mdio = of_find_device_by_node(mdio_node);
+			of_node_put(mdio_node);
+			if (!mdio) {
+				dev_err(&pdev->dev, "Missing mdio platform device\n");
+				return -EINVAL;
+			}
+			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+				 PHY_ID_FMT, mdio->name, phyid);
+			put_device(&mdio->dev);
+		} else {
+			dev_err(&pdev->dev,
+				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+				i);
+			goto no_phy_slave;
+		}
+		slave_data->phy_if = of_get_phy_mode(slave_node);
+		if (slave_data->phy_if < 0) {
+			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+				i);
+			return slave_data->phy_if;
+		}
+
+no_phy_slave:
+		mac_addr = of_get_mac_address(slave_node);
+		if (mac_addr) {
+			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+		} else {
+			ret = ti_cm_get_macid(&pdev->dev, i,
+					      slave_data->mac_addr);
+			if (ret)
+				return ret;
+		}
+		if (data->dual_emac) {
+			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
+						 &prop)) {
+				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
+				slave_data->dual_emac_res_vlan = i+1;
+				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+					slave_data->dual_emac_res_vlan, i);
+			} else {
+				slave_data->dual_emac_res_vlan = prop;
+			}
+		}
+
+		i++;
+		if (i == data->slaves)
+			break;
+	}
+
+	return 0;
+}
+
+static void cpsw_remove_dt(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	struct cpsw_platform_data *data = &cpsw->data;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *slave_node;
+	int i = 0;
+
+	for_each_available_child_of_node(node, slave_node) {
+		struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+		if (strcmp(slave_node->name, "slave"))
+			continue;
+
+		if (of_phy_is_fixed_link(slave_node))
+			of_phy_deregister_fixed_link(slave_node);
+
+		of_node_put(slave_data->phy_node);
+
+		i++;
+		if (i == data->slaves)
+			break;
+	}
+
+	of_platform_depopulate(&pdev->dev);
+}
+
+static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
+{
+	struct cpsw_common		*cpsw = priv->cpsw;
+	struct cpsw_platform_data	*data = &cpsw->data;
+	struct net_device		*ndev;
+	struct cpsw_priv		*priv_sl2;
+	int ret = 0;
+
+	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
+	if (!ndev) {
+		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
+		return -ENOMEM;
+	}
+
+	priv_sl2 = netdev_priv(ndev);
+	priv_sl2->cpsw = cpsw;
+	priv_sl2->ndev = ndev;
+	priv_sl2->dev  = &ndev->dev;
+	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+
+	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+			ETH_ALEN);
+		dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
+			 priv_sl2->mac_addr);
+	} else {
+		eth_random_addr(priv_sl2->mac_addr);
+		dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
+			 priv_sl2->mac_addr);
+	}
+	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+
+	priv_sl2->emac_port = 1;
+	cpsw->slaves[1].ndev = ndev;
+	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+
+	ndev->netdev_ops = &cpsw_netdev_ops;
+	ndev->ethtool_ops = &cpsw_ethtool_ops;
+
+	/* register the network device */
+	SET_NETDEV_DEV(ndev, cpsw->dev);
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(cpsw->dev, "cpsw: error registering net device\n");
+		free_netdev(ndev);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id cpsw_of_mtable[] = {
+	{ .compatible = "ti,cpsw"},
+	{ .compatible = "ti,am335x-cpsw"},
+	{ .compatible = "ti,am4372-cpsw"},
+	{ .compatible = "ti,dra7-cpsw"},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+	{ .family = "AM33xx", .revision = "ES1.0"},
+	{ /* sentinel */ }
+};
+
+static int cpsw_probe(struct platform_device *pdev)
+{
+	struct clk			*clk;
+	struct cpsw_platform_data	*data;
+	struct net_device		*ndev;
+	struct cpsw_priv		*priv;
+	struct cpdma_params		dma_params;
+	struct cpsw_ale_params		ale_params;
+	void __iomem			*ss_regs;
+	void __iomem			*cpts_regs;
+	struct resource			*res, *ss_res;
+	struct gpio_descs		*mode;
+	u32 slave_offset, sliver_offset, slave_size;
+	const struct soc_device_attribute *soc;
+	struct cpsw_common		*cpsw;
+	int ret = 0, i, ch;
+	int irq;
+
+	cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+	if (!cpsw)
+		return -ENOMEM;
+
+	cpsw->dev = &pdev->dev;
+
+	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
+	if (!ndev) {
+		dev_err(&pdev->dev, "error allocating net_device\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	priv->cpsw = cpsw;
+	priv->ndev = ndev;
+	priv->dev  = &ndev->dev;
+	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+	cpsw->rx_packet_max = max(rx_packet_max, 128);
+
+	mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
+	if (IS_ERR(mode)) {
+		ret = PTR_ERR(mode);
+		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+		goto clean_ndev_ret;
+	}
+
+	/*
+	 * This may be required here for child devices.
+	 */
+	pm_runtime_enable(&pdev->dev);
+
+	/* Select default pin state */
+	pinctrl_pm_select_default_state(&pdev->dev);
+
+	/* Need to enable clocks with runtime PM api to access module
+	 * registers
+	 */
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(&pdev->dev);
+		goto clean_runtime_disable_ret;
+	}
+
+	ret = cpsw_probe_dt(&cpsw->data, pdev);
+	if (ret)
+		goto clean_dt_ret;
+
+	data = &cpsw->data;
+	cpsw->rx_ch_num = 1;
+	cpsw->tx_ch_num = 1;
+
+	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
+		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
+		dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
+	} else {
+		eth_random_addr(priv->mac_addr);
+		dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
+	}
+
+	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+	cpsw->slaves = devm_kcalloc(&pdev->dev,
+				    data->slaves, sizeof(struct cpsw_slave),
+				    GFP_KERNEL);
+	if (!cpsw->slaves) {
+		ret = -ENOMEM;
+		goto clean_dt_ret;
+	}
+	for (i = 0; i < data->slaves; i++)
+		cpsw->slaves[i].slave_num = i;
+
+	cpsw->slaves[0].ndev = ndev;
+	priv->emac_port = 0;
+
+	clk = devm_clk_get(&pdev->dev, "fck");
+	if (IS_ERR(clk)) {
+		dev_err(priv->dev, "fck is not found\n");
+		ret = -ENODEV;
+		goto clean_dt_ret;
+	}
+	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+	if (IS_ERR(ss_regs)) {
+		ret = PTR_ERR(ss_regs);
+		goto clean_dt_ret;
+	}
+	cpsw->regs = ss_regs;
+
+	cpsw->version = readl(&cpsw->regs->id_ver);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(cpsw->wr_regs)) {
+		ret = PTR_ERR(cpsw->wr_regs);
+		goto clean_dt_ret;
+	}
+
+	memset(&dma_params, 0, sizeof(dma_params));
+	memset(&ale_params, 0, sizeof(ale_params));
+
+	switch (cpsw->version) {
+	case CPSW_VERSION_1:
+		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+		cpts_regs		= ss_regs + CPSW1_CPTS_OFFSET;
+		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
+		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
+		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
+		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
+		slave_offset         = CPSW1_SLAVE_OFFSET;
+		slave_size           = CPSW1_SLAVE_SIZE;
+		sliver_offset        = CPSW1_SLIVER_OFFSET;
+		dma_params.desc_mem_phys = 0;
+		break;
+	case CPSW_VERSION_2:
+	case CPSW_VERSION_3:
+	case CPSW_VERSION_4:
+		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+		cpts_regs		= ss_regs + CPSW2_CPTS_OFFSET;
+		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
+		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
+		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
+		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
+		slave_offset         = CPSW2_SLAVE_OFFSET;
+		slave_size           = CPSW2_SLAVE_SIZE;
+		sliver_offset        = CPSW2_SLIVER_OFFSET;
+		dma_params.desc_mem_phys =
+			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
+		break;
+	default:
+		dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
+		ret = -ENODEV;
+		goto clean_dt_ret;
+	}
+	for (i = 0; i < cpsw->data.slaves; i++) {
+		struct cpsw_slave *slave = &cpsw->slaves[i];
+
+		cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
+		slave_offset  += slave_size;
+		sliver_offset += SLIVER_SIZE;
+	}
+
+	dma_params.dev		= &pdev->dev;
+	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
+	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
+	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
+	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
+	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
+
+	dma_params.num_chan		= data->channels;
+	dma_params.has_soft_reset	= true;
+	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
+	dma_params.desc_mem_size	= data->bd_ram_size;
+	dma_params.desc_align		= 16;
+	dma_params.has_ext_regs		= true;
+	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
+	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
+	dma_params.descs_pool_size	= descs_pool_size;
+
+	cpsw->dma = cpdma_ctlr_create(&dma_params);
+	if (!cpsw->dma) {
+		dev_err(priv->dev, "error initializing dma\n");
+		ret = -ENOMEM;
+		goto clean_dt_ret;
+	}
+
+	soc = soc_device_match(cpsw_soc_devices);
+	if (soc)
+		cpsw->quirk_irq = 1;
+
+	ch = cpsw->quirk_irq ? 0 : 7;
+	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
+	if (IS_ERR(cpsw->txv[0].ch)) {
+		dev_err(priv->dev, "error initializing tx dma channel\n");
+		ret = PTR_ERR(cpsw->txv[0].ch);
+		goto clean_dma_ret;
+	}
+
+	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+	if (IS_ERR(cpsw->rxv[0].ch)) {
+		dev_err(priv->dev, "error initializing rx dma channel\n");
+		ret = PTR_ERR(cpsw->rxv[0].ch);
+		goto clean_dma_ret;
+	}
+
+	ale_params.dev			= &pdev->dev;
+	ale_params.ale_ageout		= ale_ageout;
+	ale_params.ale_entries		= data->ale_entries;
+	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
+
+	cpsw->ale = cpsw_ale_create(&ale_params);
+	if (!cpsw->ale) {
+		dev_err(priv->dev, "error initializing ale engine\n");
+		ret = -ENODEV;
+		goto clean_dma_ret;
+	}
+
+	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+	if (IS_ERR(cpsw->cpts)) {
+		ret = PTR_ERR(cpsw->cpts);
+		goto clean_dma_ret;
+	}
+
+	ndev->irq = platform_get_irq(pdev, 1);
+	if (ndev->irq < 0) {
+		dev_err(priv->dev, "error getting irq resource\n");
+		ret = ndev->irq;
+		goto clean_dma_ret;
+	}
+
+	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+
+	ndev->netdev_ops = &cpsw_netdev_ops;
+	ndev->ethtool_ops = &cpsw_ethtool_ops;
+	netif_napi_add(ndev, &cpsw->napi_rx,
+		       cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
+		       CPSW_POLL_WEIGHT);
+	netif_tx_napi_add(ndev, &cpsw->napi_tx,
+			  cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
+			  CPSW_POLL_WEIGHT);
+	cpsw_split_res(ndev);
+
+	/* register the network device */
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(priv->dev, "error registering net device\n");
+		ret = -ENODEV;
+		goto clean_dma_ret;
+	}
+
+	if (cpsw->data.dual_emac) {
+		ret = cpsw_probe_dual_emac(priv);
+		if (ret) {
+			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+			goto clean_unregister_netdev_ret;
+		}
+	}
+
+	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+	 * MISC IRQs which are always kept disabled with this driver so
+	 * we will not request them.
+	 *
+	 * If anyone wants to implement support for those, make sure to
+	 * first request and append them to irqs_table array.
+	 */
+
+	/* RX IRQ */
+	irq = platform_get_irq(pdev, 1);
+	if (irq < 0) {
+		ret = irq;
+		goto clean_dma_ret;
+	}
+
+	cpsw->irqs_table[0] = irq;
+	ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
+			       0, dev_name(&pdev->dev), cpsw);
+	if (ret < 0) {
+		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+		goto clean_dma_ret;
+	}
+
+	/* TX IRQ */
+	irq = platform_get_irq(pdev, 2);
+	if (irq < 0) {
+		ret = irq;
+		goto clean_dma_ret;
+	}
+
+	cpsw->irqs_table[1] = irq;
+	ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
+			       0, dev_name(&pdev->dev), cpsw);
+	if (ret < 0) {
+		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+		goto clean_dma_ret;
+	}
+
+	cpsw_notice(priv, probe,
+		    "initialized device (regs %pa, irq %d, pool size %d)\n",
+		    &ss_res->start, ndev->irq, dma_params.descs_pool_size);
+
+	pm_runtime_put(&pdev->dev);
+
+	return 0;
+
+clean_unregister_netdev_ret:
+	unregister_netdev(ndev);
+clean_dma_ret:
+	cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+	cpsw_remove_dt(pdev);
+	pm_runtime_put_sync(&pdev->dev);
+clean_runtime_disable_ret:
+	pm_runtime_disable(&pdev->dev);
+clean_ndev_ret:
+	free_netdev(priv->ndev);
+	return ret;
+}
+
+static int cpsw_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	int ret;
+
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(&pdev->dev);
+		return ret;
+	}
+
+	if (cpsw->data.dual_emac)
+		unregister_netdev(cpsw->slaves[1].ndev);
+	unregister_netdev(ndev);
+
+	cpts_release(cpsw->cpts);
+	cpdma_ctlr_destroy(cpsw->dma);
+	cpsw_remove_dt(pdev);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	if (cpsw->data.dual_emac)
+		free_netdev(cpsw->slaves[1].ndev);
+	free_netdev(ndev);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cpsw_suspend(struct device *dev)
+{
+	struct platform_device	*pdev = to_platform_device(dev);
+	struct net_device	*ndev = platform_get_drvdata(pdev);
+	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
+
+	if (cpsw->data.dual_emac) {
+		int i;
+
+		for (i = 0; i < cpsw->data.slaves; i++) {
+			if (netif_running(cpsw->slaves[i].ndev))
+				cpsw_ndo_stop(cpsw->slaves[i].ndev);
+		}
+	} else {
+		if (netif_running(ndev))
+			cpsw_ndo_stop(ndev);
+	}
+
+	/* Select sleep pin state */
+	pinctrl_pm_select_sleep_state(dev);
+
+	return 0;
+}
+
+static int cpsw_resume(struct device *dev)
+{
+	struct platform_device	*pdev = to_platform_device(dev);
+	struct net_device	*ndev = platform_get_drvdata(pdev);
+	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
+
+	/* Select default pin state */
+	pinctrl_pm_select_default_state(dev);
+
+	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+	rtnl_lock();
+	if (cpsw->data.dual_emac) {
+		int i;
+
+		for (i = 0; i < cpsw->data.slaves; i++) {
+			if (netif_running(cpsw->slaves[i].ndev))
+				cpsw_ndo_open(cpsw->slaves[i].ndev);
+		}
+	} else {
+		if (netif_running(ndev))
+			cpsw_ndo_open(ndev);
+	}
+	rtnl_unlock();
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
+
+static struct platform_driver cpsw_driver = {
+	.driver = {
+		.name	 = "cpsw",
+		.pm	 = &cpsw_pm_ops,
+		.of_match_table = cpsw_of_mtable,
+	},
+	.probe = cpsw_probe,
+	.remove = cpsw_remove,
+};
+
+module_platform_driver(cpsw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_DESCRIPTION("TI CPSW Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
new file mode 100644
index 0000000..cf111db
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -0,0 +1,27 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __CPSW_H__
+#define __CPSW_H__
+
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
+			 ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
+
+#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
new file mode 100644
index 0000000..5766225
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -0,0 +1,897 @@
+/*
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include <linux/etherdevice.h>
+
+#include "cpsw_ale.h"
+
+#define BITMASK(bits)		(BIT(bits) - 1)
+
+#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
+#define ALE_VERSION_MINOR(rev)	(rev & 0xff)
+#define ALE_VERSION_1R3		0x0103
+#define ALE_VERSION_1R4		0x0104
+
+/* ALE Registers */
+#define ALE_IDVER		0x00
+#define ALE_STATUS		0x04
+#define ALE_CONTROL		0x08
+#define ALE_PRESCALE		0x10
+#define ALE_UNKNOWNVLAN		0x18
+#define ALE_TABLE_CONTROL	0x20
+#define ALE_TABLE		0x34
+#define ALE_PORTCTL		0x40
+
+/* ALE NetCP NU switch specific Registers */
+#define ALE_UNKNOWNVLAN_MEMBER			0x90
+#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD	0x94
+#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD		0x98
+#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS	0x9C
+#define ALE_VLAN_MASK_MUX(reg)			(0xc0 + (0x4 * (reg)))
+
+#define ALE_TABLE_WRITE		BIT(31)
+
+#define ALE_TYPE_FREE			0
+#define ALE_TYPE_ADDR			1
+#define ALE_TYPE_VLAN			2
+#define ALE_TYPE_VLAN_ADDR		3
+
+#define ALE_UCAST_PERSISTANT		0
+#define ALE_UCAST_UNTOUCHED		1
+#define ALE_UCAST_OUI			2
+#define ALE_UCAST_TOUCHED		3
+
+#define ALE_TABLE_SIZE_MULTIPLIER	1024
+#define ALE_STATUS_SIZE_MASK		0x1f
+#define ALE_TABLE_SIZE_DEFAULT		64
+
+static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+{
+	int idx;
+
+	idx    = start / 32;
+	start -= idx * 32;
+	idx    = 2 - idx; /* flip */
+	return (ale_entry[idx] >> start) & BITMASK(bits);
+}
+
+static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+				      u32 value)
+{
+	int idx;
+
+	value &= BITMASK(bits);
+	idx    = start / 32;
+	start -= idx * 32;
+	idx    = 2 - idx; /* flip */
+	ale_entry[idx] &= ~(BITMASK(bits) << start);
+	ale_entry[idx] |=  (value << start);
+}
+
+#define DEFINE_ALE_FIELD(name, start, bits)				\
+static inline int cpsw_ale_get_##name(u32 *ale_entry)			\
+{									\
+	return cpsw_ale_get_field(ale_entry, start, bits);		\
+}									\
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value)	\
+{									\
+	cpsw_ale_set_field(ale_entry, start, bits, value);		\
+}
+
+#define DEFINE_ALE_FIELD1(name, start)					\
+static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits)		\
+{									\
+	return cpsw_ale_get_field(ale_entry, start, bits);		\
+}									\
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value,	\
+		u32 bits)						\
+{									\
+	cpsw_ale_set_field(ale_entry, start, bits, value);		\
+}
+
+DEFINE_ALE_FIELD(entry_type,		60,	2)
+DEFINE_ALE_FIELD(vlan_id,		48,	12)
+DEFINE_ALE_FIELD(mcast_state,		62,	2)
+DEFINE_ALE_FIELD1(port_mask,		66)
+DEFINE_ALE_FIELD(super,			65,	1)
+DEFINE_ALE_FIELD(ucast_type,		62,     2)
+DEFINE_ALE_FIELD1(port_num,		66)
+DEFINE_ALE_FIELD(blocked,		65,     1)
+DEFINE_ALE_FIELD(secure,		64,     1)
+DEFINE_ALE_FIELD1(vlan_untag_force,	24)
+DEFINE_ALE_FIELD1(vlan_reg_mcast,	16)
+DEFINE_ALE_FIELD1(vlan_unreg_mcast,	8)
+DEFINE_ALE_FIELD1(vlan_member_list,	0)
+DEFINE_ALE_FIELD(mcast,			40,	1)
+/* ALE NetCP nu switch specific */
+DEFINE_ALE_FIELD(vlan_unreg_mcast_idx,	20,	3)
+DEFINE_ALE_FIELD(vlan_reg_mcast_idx,	44,	3)
+
+/* The MAC address field in the ALE entry cannot be macroized as above */
+static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
+{
+	int i;
+
+	for (i = 0; i < 6; i++)
+		addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
+}
+
+static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
+{
+	int i;
+
+	for (i = 0; i < 6; i++)
+		cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
+}
+
+static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+{
+	int i;
+
+	WARN_ON(idx > ale->params.ale_entries);
+
+	writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+
+	for (i = 0; i < ALE_ENTRY_WORDS; i++)
+		ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+					     ALE_TABLE + 4 * i);
+
+	return idx;
+}
+
+static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+{
+	int i;
+
+	WARN_ON(idx > ale->params.ale_entries);
+
+	for (i = 0; i < ALE_ENTRY_WORDS; i++)
+		writel_relaxed(ale_entry[i], ale->params.ale_regs +
+			       ALE_TABLE + 4 * i);
+
+	writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+		       ALE_TABLE_CONTROL);
+
+	return idx;
+}
+
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int type, idx;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		u8 entry_addr[6];
+
+		cpsw_ale_read(ale, idx, ale_entry);
+		type = cpsw_ale_get_entry_type(ale_entry);
+		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+			continue;
+		if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+			continue;
+		cpsw_ale_get_addr(ale_entry, entry_addr);
+		if (ether_addr_equal(entry_addr, addr))
+			return idx;
+	}
+	return -ENOENT;
+}
+
+static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int type, idx;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		cpsw_ale_read(ale, idx, ale_entry);
+		type = cpsw_ale_get_entry_type(ale_entry);
+		if (type != ALE_TYPE_VLAN)
+			continue;
+		if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+			return idx;
+	}
+	return -ENOENT;
+}
+
+static int cpsw_ale_match_free(struct cpsw_ale *ale)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int type, idx;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		cpsw_ale_read(ale, idx, ale_entry);
+		type = cpsw_ale_get_entry_type(ale_entry);
+		if (type == ALE_TYPE_FREE)
+			return idx;
+	}
+	return -ENOENT;
+}
+
+static int cpsw_ale_find_ageable(struct cpsw_ale *ale)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int type, idx;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		cpsw_ale_read(ale, idx, ale_entry);
+		type = cpsw_ale_get_entry_type(ale_entry);
+		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+			continue;
+		if (cpsw_ale_get_mcast(ale_entry))
+			continue;
+		type = cpsw_ale_get_ucast_type(ale_entry);
+		if (type != ALE_UCAST_PERSISTANT &&
+		    type != ALE_UCAST_OUI)
+			return idx;
+	}
+	return -ENOENT;
+}
+
+static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+				 int port_mask)
+{
+	int mask;
+
+	mask = cpsw_ale_get_port_mask(ale_entry,
+				      ale->port_mask_bits);
+	if ((mask & port_mask) == 0)
+		return; /* ports dont intersect, not interested */
+	mask &= ~port_mask;
+
+	/* free if only remaining port is host port */
+	if (mask)
+		cpsw_ale_set_port_mask(ale_entry, mask,
+				       ale->port_mask_bits);
+	else
+		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int ret, idx;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		cpsw_ale_read(ale, idx, ale_entry);
+		ret = cpsw_ale_get_entry_type(ale_entry);
+		if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+			continue;
+
+		/* if vid passed is -1 then remove all multicast entry from
+		 * the table irrespective of vlan id, if a valid vlan id is
+		 * passed then remove only multicast added to that vlan id.
+		 * if vlan id doesn't match then move on to next entry.
+		 */
+		if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+			continue;
+
+		if (cpsw_ale_get_mcast(ale_entry)) {
+			u8 addr[6];
+
+			cpsw_ale_get_addr(ale_entry, addr);
+			if (!is_broadcast_ether_addr(addr))
+				cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+		}
+
+		cpsw_ale_write(ale, idx, ale_entry);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
+
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+						int flags, u16 vid)
+{
+	if (flags & ALE_VLAN) {
+		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+		cpsw_ale_set_vlan_id(ale_entry, vid);
+	} else {
+		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+	}
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+		       int flags, u16 vid)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+	int idx;
+
+	cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
+	cpsw_ale_set_addr(ale_entry, addr);
+	cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
+	cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
+	cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+	cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits);
+
+	idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+	if (idx < 0)
+		idx = cpsw_ale_match_free(ale);
+	if (idx < 0)
+		idx = cpsw_ale_find_ageable(ale);
+	if (idx < 0)
+		return -ENOMEM;
+
+	cpsw_ale_write(ale, idx, ale_entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
+
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+		       int flags, u16 vid)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+	int idx;
+
+	idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+	if (idx < 0)
+		return -ENOENT;
+
+	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+	cpsw_ale_write(ale, idx, ale_entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
+
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+		       int flags, u16 vid, int mcast_state)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+	int idx, mask;
+
+	idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+	if (idx >= 0)
+		cpsw_ale_read(ale, idx, ale_entry);
+
+	cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
+	cpsw_ale_set_addr(ale_entry, addr);
+	cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+	cpsw_ale_set_mcast_state(ale_entry, mcast_state);
+
+	mask = cpsw_ale_get_port_mask(ale_entry,
+				      ale->port_mask_bits);
+	port_mask |= mask;
+	cpsw_ale_set_port_mask(ale_entry, port_mask,
+			       ale->port_mask_bits);
+
+	if (idx < 0)
+		idx = cpsw_ale_match_free(ale);
+	if (idx < 0)
+		idx = cpsw_ale_find_ageable(ale);
+	if (idx < 0)
+		return -ENOMEM;
+
+	cpsw_ale_write(ale, idx, ale_entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
+
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+		       int flags, u16 vid)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+	int idx;
+
+	idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+	if (idx < 0)
+		return -ENOENT;
+
+	cpsw_ale_read(ale, idx, ale_entry);
+
+	if (port_mask)
+		cpsw_ale_set_port_mask(ale_entry, port_mask,
+				       ale->port_mask_bits);
+	else
+		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+	cpsw_ale_write(ale, idx, ale_entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
+
+/* ALE NetCP NU switch specific vlan functions */
+static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+				    int reg_mcast, int unreg_mcast)
+{
+	int idx;
+
+	/* Set VLAN registered multicast flood mask */
+	idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry);
+	writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+
+	/* Set VLAN unregistered multicast flood mask */
+	idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+	writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+}
+
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+		      int reg_mcast, int unreg_mcast)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+	int idx;
+
+	idx = cpsw_ale_match_vlan(ale, vid);
+	if (idx >= 0)
+		cpsw_ale_read(ale, idx, ale_entry);
+
+	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+	cpsw_ale_set_vlan_id(ale_entry, vid);
+
+	cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
+	if (!ale->params.nu_switch_ale) {
+		cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
+					    ale->vlan_field_bits);
+		cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+					      ale->vlan_field_bits);
+	} else {
+		cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
+	}
+	cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
+
+	if (idx < 0)
+		idx = cpsw_ale_match_free(ale);
+	if (idx < 0)
+		idx = cpsw_ale_find_ageable(ale);
+	if (idx < 0)
+		return -ENOMEM;
+
+	cpsw_ale_write(ale, idx, ale_entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+	int idx;
+
+	idx = cpsw_ale_match_vlan(ale, vid);
+	if (idx < 0)
+		return -ENOENT;
+
+	cpsw_ale_read(ale, idx, ale_entry);
+
+	if (port_mask)
+		cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
+					      ale->vlan_field_bits);
+	else
+		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+	cpsw_ale_write(ale, idx, ale_entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
+
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
+{
+	u32 ale_entry[ALE_ENTRY_WORDS];
+	int type, idx;
+	int unreg_mcast = 0;
+
+	/* Only bother doing the work if the setting is actually changing */
+	if (ale->allmulti == allmulti)
+		return;
+
+	/* Remember the new setting to check against next time */
+	ale->allmulti = allmulti;
+
+	for (idx = 0; idx < ale->params.ale_entries; idx++) {
+		cpsw_ale_read(ale, idx, ale_entry);
+		type = cpsw_ale_get_entry_type(ale_entry);
+		if (type != ALE_TYPE_VLAN)
+			continue;
+
+		unreg_mcast =
+			cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+						      ale->vlan_field_bits);
+		if (allmulti)
+			unreg_mcast |= 1;
+		else
+			unreg_mcast &= ~1;
+		cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+					      ale->vlan_field_bits);
+		cpsw_ale_write(ale, idx, ale_entry);
+	}
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
+
+struct ale_control_info {
+	const char	*name;
+	int		offset, port_offset;
+	int		shift, port_shift;
+	int		bits;
+};
+
+static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
+	[ALE_ENABLE]		= {
+		.name		= "enable",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 31,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_CLEAR]		= {
+		.name		= "clear",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 30,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_AGEOUT]		= {
+		.name		= "ageout",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 29,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_P0_UNI_FLOOD]	= {
+		.name		= "port0_unicast_flood",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 8,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_VLAN_NOLEARN]	= {
+		.name		= "vlan_nolearn",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 7,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_NO_PORT_VLAN]	= {
+		.name		= "no_port_vlan",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 6,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_OUI_DENY]		= {
+		.name		= "oui_deny",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 5,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_BYPASS]		= {
+		.name		= "bypass",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 4,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_RATE_LIMIT_TX]	= {
+		.name		= "rate_limit_tx",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 3,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_VLAN_AWARE]	= {
+		.name		= "vlan_aware",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 2,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_AUTH_ENABLE]	= {
+		.name		= "auth_enable",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 1,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_RATE_LIMIT]	= {
+		.name		= "rate_limit",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 0,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_PORT_STATE]	= {
+		.name		= "port_state",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 0,
+		.port_shift	= 0,
+		.bits		= 2,
+	},
+	[ALE_PORT_DROP_UNTAGGED] = {
+		.name		= "drop_untagged",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 2,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_PORT_DROP_UNKNOWN_VLAN] = {
+		.name		= "drop_unknown",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 3,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_PORT_NOLEARN]	= {
+		.name		= "nolearn",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 4,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_PORT_NO_SA_UPDATE]	= {
+		.name		= "no_source_update",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 5,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
+	[ALE_PORT_MCAST_LIMIT]	= {
+		.name		= "mcast_limit",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 16,
+		.port_shift	= 0,
+		.bits		= 8,
+	},
+	[ALE_PORT_BCAST_LIMIT]	= {
+		.name		= "bcast_limit",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 24,
+		.port_shift	= 0,
+		.bits		= 8,
+	},
+	[ALE_PORT_UNKNOWN_VLAN_MEMBER] = {
+		.name		= "unknown_vlan_member",
+		.offset		= ALE_UNKNOWNVLAN,
+		.port_offset	= 0,
+		.shift		= 0,
+		.port_shift	= 0,
+		.bits		= 6,
+	},
+	[ALE_PORT_UNKNOWN_MCAST_FLOOD] = {
+		.name		= "unknown_mcast_flood",
+		.offset		= ALE_UNKNOWNVLAN,
+		.port_offset	= 0,
+		.shift		= 8,
+		.port_shift	= 0,
+		.bits		= 6,
+	},
+	[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = {
+		.name		= "unknown_reg_flood",
+		.offset		= ALE_UNKNOWNVLAN,
+		.port_offset	= 0,
+		.shift		= 16,
+		.port_shift	= 0,
+		.bits		= 6,
+	},
+	[ALE_PORT_UNTAGGED_EGRESS] = {
+		.name		= "untagged_egress",
+		.offset		= ALE_UNKNOWNVLAN,
+		.port_offset	= 0,
+		.shift		= 24,
+		.port_shift	= 0,
+		.bits		= 6,
+	},
+};
+
+int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
+			 int value)
+{
+	const struct ale_control_info *info;
+	int offset, shift;
+	u32 tmp, mask;
+
+	if (control < 0 || control >= ARRAY_SIZE(ale_controls))
+		return -EINVAL;
+
+	info = &ale_controls[control];
+	if (info->port_offset == 0 && info->port_shift == 0)
+		port = 0; /* global, port is a dont care */
+
+	if (port < 0 || port >= ale->params.ale_ports)
+		return -EINVAL;
+
+	mask = BITMASK(info->bits);
+	if (value & ~mask)
+		return -EINVAL;
+
+	offset = info->offset + (port * info->port_offset);
+	shift  = info->shift  + (port * info->port_shift);
+
+	tmp = readl_relaxed(ale->params.ale_regs + offset);
+	tmp = (tmp & ~(mask << shift)) | (value << shift);
+	writel_relaxed(tmp, ale->params.ale_regs + offset);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
+
+int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
+{
+	const struct ale_control_info *info;
+	int offset, shift;
+	u32 tmp;
+
+	if (control < 0 || control >= ARRAY_SIZE(ale_controls))
+		return -EINVAL;
+
+	info = &ale_controls[control];
+	if (info->port_offset == 0 && info->port_shift == 0)
+		port = 0; /* global, port is a dont care */
+
+	if (port < 0 || port >= ale->params.ale_ports)
+		return -EINVAL;
+
+	offset = info->offset + (port * info->port_offset);
+	shift  = info->shift  + (port * info->port_shift);
+
+	tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
+	return tmp & BITMASK(info->bits);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
+
+static void cpsw_ale_timer(struct timer_list *t)
+{
+	struct cpsw_ale *ale = from_timer(ale, t, timer);
+
+	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+	if (ale->ageout) {
+		ale->timer.expires = jiffies + ale->ageout;
+		add_timer(&ale->timer);
+	}
+}
+
+void cpsw_ale_start(struct cpsw_ale *ale)
+{
+	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+	cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+	timer_setup(&ale->timer, cpsw_ale_timer, 0);
+	if (ale->ageout) {
+		ale->timer.expires = jiffies + ale->ageout;
+		add_timer(&ale->timer);
+	}
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+	del_timer_sync(&ale->timer);
+	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+	struct cpsw_ale *ale;
+	u32 rev, ale_entries;
+
+	ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+	if (!ale)
+		return NULL;
+
+	ale->params = *params;
+	ale->ageout = ale->params.ale_ageout * HZ;
+
+	rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
+	if (!ale->params.major_ver_mask)
+		ale->params.major_ver_mask = 0xff;
+	ale->version =
+		(ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
+		 ALE_VERSION_MINOR(rev);
+	dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
+		 ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
+		 ALE_VERSION_MINOR(rev));
+
+	if (!ale->params.ale_entries) {
+		ale_entries =
+			readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+			ALE_STATUS_SIZE_MASK;
+		/* ALE available on newer NetCP switches has introduced
+		 * a register, ALE_STATUS, to indicate the size of ALE
+		 * table which shows the size as a multiple of 1024 entries.
+		 * For these, params.ale_entries will be set to zero. So
+		 * read the register and update the value of ale_entries.
+		 * ALE table on NetCP lite, is much smaller and is indicated
+		 * by a value of zero in ALE_STATUS. So use a default value
+		 * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected
+		 * to set the value of ale_entries for all other versions
+		 * of ALE.
+		 */
+		if (!ale_entries)
+			ale_entries = ALE_TABLE_SIZE_DEFAULT;
+		else
+			ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
+		ale->params.ale_entries = ale_entries;
+	}
+	dev_info(ale->params.dev,
+		 "ALE Table size %ld\n", ale->params.ale_entries);
+
+	/* set default bits for existing h/w */
+	ale->port_mask_bits = ale->params.ale_ports;
+	ale->port_num_bits = order_base_2(ale->params.ale_ports);
+	ale->vlan_field_bits = ale->params.ale_ports;
+
+	/* Set defaults override for ALE on NetCP NU switch and for version
+	 * 1R3
+	 */
+	if (ale->params.nu_switch_ale) {
+		/* Separate registers for unknown vlan configuration.
+		 * Also there are N bits, where N is number of ale
+		 * ports and shift value should be 0
+		 */
+		ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits =
+					ale->params.ale_ports;
+		ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset =
+					ALE_UNKNOWNVLAN_MEMBER;
+		ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits =
+					ale->params.ale_ports;
+		ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0;
+		ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset =
+					ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD;
+		ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits =
+					ale->params.ale_ports;
+		ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0;
+		ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset =
+					ALE_UNKNOWNVLAN_REG_MCAST_FLOOD;
+		ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits =
+					ale->params.ale_ports;
+		ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
+		ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
+					ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
+	}
+
+	return ale;
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_create);
+
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
+{
+	int i;
+
+	for (i = 0; i < ale->params.ale_entries; i++) {
+		cpsw_ale_read(ale, i, data);
+		data += ALE_ENTRY_WORDS;
+	}
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_dump);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPSW ALE driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
new file mode 100644
index 0000000..d4fe901
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -0,0 +1,126 @@
+/*
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __TI_CPSW_ALE_H__
+#define __TI_CPSW_ALE_H__
+
+struct cpsw_ale_params {
+	struct device		*dev;
+	void __iomem		*ale_regs;
+	unsigned long		ale_ageout;	/* in secs */
+	unsigned long		ale_entries;
+	unsigned long		ale_ports;
+	/* NU Switch has specific handling as number of bits in ALE entries
+	 * are different than other versions of ALE. Also there are specific
+	 * registers for unknown vlan specific fields. So use nu_switch_ale
+	 * to identify this hardware.
+	 */
+	bool			nu_switch_ale;
+	/* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
+	 * pass it from caller.
+	 */
+	u32			major_ver_mask;
+};
+
+struct cpsw_ale {
+	struct cpsw_ale_params	params;
+	struct timer_list	timer;
+	unsigned long		ageout;
+	int			allmulti;
+	u32			version;
+	/* These bits are different on NetCP NU Switch ALE */
+	u32			port_mask_bits;
+	u32			port_num_bits;
+	u32			vlan_field_bits;
+};
+
+enum cpsw_ale_control {
+	/* global */
+	ALE_ENABLE,
+	ALE_CLEAR,
+	ALE_AGEOUT,
+	ALE_P0_UNI_FLOOD,
+	ALE_VLAN_NOLEARN,
+	ALE_NO_PORT_VLAN,
+	ALE_OUI_DENY,
+	ALE_BYPASS,
+	ALE_RATE_LIMIT_TX,
+	ALE_VLAN_AWARE,
+	ALE_AUTH_ENABLE,
+	ALE_RATE_LIMIT,
+	/* port controls */
+	ALE_PORT_STATE,
+	ALE_PORT_DROP_UNTAGGED,
+	ALE_PORT_DROP_UNKNOWN_VLAN,
+	ALE_PORT_NOLEARN,
+	ALE_PORT_NO_SA_UPDATE,
+	ALE_PORT_UNKNOWN_VLAN_MEMBER,
+	ALE_PORT_UNKNOWN_MCAST_FLOOD,
+	ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+	ALE_PORT_UNTAGGED_EGRESS,
+	ALE_PORT_BCAST_LIMIT,
+	ALE_PORT_MCAST_LIMIT,
+	ALE_NUM_CONTROLS,
+};
+
+enum cpsw_ale_port_state {
+	ALE_PORT_STATE_DISABLE	= 0x00,
+	ALE_PORT_STATE_BLOCK	= 0x01,
+	ALE_PORT_STATE_LEARN	= 0x02,
+	ALE_PORT_STATE_FORWARD	= 0x03,
+};
+
+/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
+#define ALE_SECURE			BIT(0)
+#define ALE_BLOCKED			BIT(1)
+#define ALE_SUPER			BIT(2)
+#define ALE_VLAN			BIT(3)
+
+#define ALE_PORT_HOST			BIT(0)
+#define ALE_PORT_1			BIT(1)
+#define ALE_PORT_2			BIT(2)
+
+#define ALE_MCAST_FWD			0
+#define ALE_MCAST_BLOCK_LEARN_FWD	1
+#define ALE_MCAST_FWD_LEARN		2
+#define ALE_MCAST_FWD_2			3
+
+#define ALE_ENTRY_BITS		68
+#define ALE_ENTRY_WORDS	DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
+
+void cpsw_ale_start(struct cpsw_ale *ale);
+void cpsw_ale_stop(struct cpsw_ale *ale);
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+		       int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+		       int flags, u16 vid);
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+		       int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+		       int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+			int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
+
+int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
+int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
+			 int control, int value);
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+
+#endif
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 0000000..b96b93c
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,605 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/err.h>
+#include <linux/if.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include "cpts.h"
+
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+
+struct cpts_skb_cb_data {
+	unsigned long tmo;
+};
+
+#define cpts_read32(c, r)	readl_relaxed(&c->reg->r)
+#define cpts_write32(c, v, r)	writel_relaxed(v, &c->reg->r)
+
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+		      u16 ts_seqid, u8 ts_msgtype);
+
+static int event_expired(struct cpts_event *event)
+{
+	return time_after(jiffies, event->tmo);
+}
+
+static int event_type(struct cpts_event *event)
+{
+	return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+}
+
+static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
+{
+	u32 r = cpts_read32(cpts, intstat_raw);
+
+	if (r & TS_PEND_RAW) {
+		*high = cpts_read32(cpts, event_high);
+		*low  = cpts_read32(cpts, event_low);
+		cpts_write32(cpts, EVENT_POP, event_pop);
+		return 0;
+	}
+	return -1;
+}
+
+static int cpts_purge_events(struct cpts *cpts)
+{
+	struct list_head *this, *next;
+	struct cpts_event *event;
+	int removed = 0;
+
+	list_for_each_safe(this, next, &cpts->events) {
+		event = list_entry(this, struct cpts_event, list);
+		if (event_expired(event)) {
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			++removed;
+		}
+	}
+
+	if (removed)
+		pr_debug("cpts: event pool cleaned up %d\n", removed);
+	return removed ? 0 : -1;
+}
+
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+	struct sk_buff *skb, *tmp;
+	u16 seqid;
+	u8 mtype;
+	bool found = false;
+
+	mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+	seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+
+	/* no need to grab txq.lock as access is always done under cpts->lock */
+	skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+		struct skb_shared_hwtstamps ssh;
+		unsigned int class = ptp_classify_raw(skb);
+		struct cpts_skb_cb_data *skb_cb =
+					(struct cpts_skb_cb_data *)skb->cb;
+
+		if (cpts_match(skb, class, seqid, mtype)) {
+			u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+			memset(&ssh, 0, sizeof(ssh));
+			ssh.hwtstamp = ns_to_ktime(ns);
+			skb_tstamp_tx(skb, &ssh);
+			found = true;
+			__skb_unlink(skb, &cpts->txq);
+			dev_consume_skb_any(skb);
+			dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
+				mtype, seqid);
+			break;
+		}
+
+		if (time_after(jiffies, skb_cb->tmo)) {
+			/* timeout any expired skbs over 1s */
+			dev_dbg(cpts->dev,
+				"expiring tx timestamp mtype %u seqid %04x\n",
+				mtype, seqid);
+			__skb_unlink(skb, &cpts->txq);
+			dev_consume_skb_any(skb);
+		}
+	}
+
+	return found;
+}
+
+/*
+ * Returns zero if matching event type was found.
+ */
+static int cpts_fifo_read(struct cpts *cpts, int match)
+{
+	int i, type = -1;
+	u32 hi, lo;
+	struct cpts_event *event;
+
+	for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
+		if (cpts_fifo_pop(cpts, &hi, &lo))
+			break;
+
+		if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
+			pr_err("cpts: event pool empty\n");
+			return -1;
+		}
+
+		event = list_first_entry(&cpts->pool, struct cpts_event, list);
+		event->tmo = jiffies + 2;
+		event->high = hi;
+		event->low = lo;
+		type = event_type(event);
+		switch (type) {
+		case CPTS_EV_TX:
+			if (cpts_match_tx_ts(cpts, event)) {
+				/* if the new event matches an existing skb,
+				 * then don't queue it
+				 */
+				break;
+			}
+			/* fall through */
+		case CPTS_EV_PUSH:
+		case CPTS_EV_RX:
+			list_del_init(&event->list);
+			list_add_tail(&event->list, &cpts->events);
+			break;
+		case CPTS_EV_ROLL:
+		case CPTS_EV_HALF:
+		case CPTS_EV_HW:
+			break;
+		default:
+			pr_err("cpts: unknown event type\n");
+			break;
+		}
+		if (type == match)
+			break;
+	}
+	return type == match ? 0 : -1;
+}
+
+static u64 cpts_systim_read(const struct cyclecounter *cc)
+{
+	u64 val = 0;
+	struct cpts_event *event;
+	struct list_head *this, *next;
+	struct cpts *cpts = container_of(cc, struct cpts, cc);
+
+	cpts_write32(cpts, TS_PUSH, ts_push);
+	if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
+		pr_err("cpts: unable to obtain a time stamp\n");
+
+	list_for_each_safe(this, next, &cpts->events) {
+		event = list_entry(this, struct cpts_event, list);
+		if (event_type(event) == CPTS_EV_PUSH) {
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			val = event->low;
+			break;
+		}
+	}
+
+	return val;
+}
+
+/* PTP clock operations */
+
+static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 adj;
+	u32 diff, mult;
+	int neg_adj = 0;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	mult = cpts->cc_mult;
+	adj = mult;
+	adj *= ppb;
+	diff = div_u64(adj, 1000000000ULL);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+
+	timecounter_read(&cpts->tc);
+
+	cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return 0;
+}
+
+static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	timecounter_adjtime(&cpts->tc, delta);
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return 0;
+}
+
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	ns = timecounter_read(&cpts->tc);
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int cpts_ptp_settime(struct ptp_clock_info *ptp,
+			    const struct timespec64 *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+	ns = timespec64_to_ns(ts);
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	timecounter_init(&cpts->tc, &cpts->cc, ns);
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return 0;
+}
+
+static int cpts_ptp_enable(struct ptp_clock_info *ptp,
+			   struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+	struct cpts *cpts = container_of(ptp, struct cpts, info);
+	unsigned long delay = cpts->ov_check_period;
+	struct timespec64 ts;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+
+	if (!skb_queue_empty(&cpts->txq))
+		delay = CPTS_SKB_TX_WORK_TIMEOUT;
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	pr_debug("cpts overflow check at %lld.%09ld\n",
+		 (long long)ts.tv_sec, ts.tv_nsec);
+	return (long)delay;
+}
+
+static const struct ptp_clock_info cpts_info = {
+	.owner		= THIS_MODULE,
+	.name		= "CTPS timer",
+	.max_adj	= 1000000,
+	.n_ext_ts	= 0,
+	.n_pins		= 0,
+	.pps		= 0,
+	.adjfreq	= cpts_ptp_adjfreq,
+	.adjtime	= cpts_ptp_adjtime,
+	.gettime64	= cpts_ptp_gettime,
+	.settime64	= cpts_ptp_settime,
+	.enable		= cpts_ptp_enable,
+	.do_aux_work	= cpts_overflow_check,
+};
+
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+		      u16 ts_seqid, u8 ts_msgtype)
+{
+	u16 *seqid;
+	unsigned int offset = 0;
+	u8 *msgtype, *data = skb->data;
+
+	if (ptp_class & PTP_CLASS_VLAN)
+		offset += VLAN_HLEN;
+
+	switch (ptp_class & PTP_CLASS_PMASK) {
+	case PTP_CLASS_IPV4:
+		offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+		break;
+	case PTP_CLASS_IPV6:
+		offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+		break;
+	case PTP_CLASS_L2:
+		offset += ETH_HLEN;
+		break;
+	default:
+		return 0;
+	}
+
+	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+		return 0;
+
+	if (unlikely(ptp_class & PTP_CLASS_V1))
+		msgtype = data + offset + OFF_PTP_CONTROL;
+	else
+		msgtype = data + offset;
+
+	seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+	return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+}
+
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+{
+	u64 ns = 0;
+	struct cpts_event *event;
+	struct list_head *this, *next;
+	unsigned int class = ptp_classify_raw(skb);
+	unsigned long flags;
+	u16 seqid;
+	u8 mtype;
+
+	if (class == PTP_CLASS_NONE)
+		return 0;
+
+	spin_lock_irqsave(&cpts->lock, flags);
+	cpts_fifo_read(cpts, -1);
+	list_for_each_safe(this, next, &cpts->events) {
+		event = list_entry(this, struct cpts_event, list);
+		if (event_expired(event)) {
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			continue;
+		}
+		mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+		seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+		if (ev_type == event_type(event) &&
+		    cpts_match(skb, class, seqid, mtype)) {
+			ns = timecounter_cyc2time(&cpts->tc, event->low);
+			list_del_init(&event->list);
+			list_add(&event->list, &cpts->pool);
+			break;
+		}
+	}
+
+	if (ev_type == CPTS_EV_TX && !ns) {
+		struct cpts_skb_cb_data *skb_cb =
+				(struct cpts_skb_cb_data *)skb->cb;
+		/* Not found, add frame to queue for processing later.
+		 * The periodic FIFO check will handle this.
+		 */
+		skb_get(skb);
+		/* get the timestamp for timeouts */
+		skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+		__skb_queue_tail(&cpts->txq, skb);
+		ptp_schedule_worker(cpts->clock, 0);
+	}
+	spin_unlock_irqrestore(&cpts->lock, flags);
+
+	return ns;
+}
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+	u64 ns;
+	struct skb_shared_hwtstamps *ssh;
+
+	if (!cpts->rx_enable)
+		return;
+	ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+	if (!ns)
+		return;
+	ssh = skb_hwtstamps(skb);
+	memset(ssh, 0, sizeof(*ssh));
+	ssh->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
+
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+	u64 ns;
+	struct skb_shared_hwtstamps ssh;
+
+	if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+		return;
+	ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
+	if (!ns)
+		return;
+	memset(&ssh, 0, sizeof(ssh));
+	ssh.hwtstamp = ns_to_ktime(ns);
+	skb_tstamp_tx(skb, &ssh);
+}
+EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
+
+int cpts_register(struct cpts *cpts)
+{
+	int err, i;
+
+	skb_queue_head_init(&cpts->txq);
+	INIT_LIST_HEAD(&cpts->events);
+	INIT_LIST_HEAD(&cpts->pool);
+	for (i = 0; i < CPTS_MAX_EVENTS; i++)
+		list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+	clk_enable(cpts->refclk);
+
+	cpts_write32(cpts, CPTS_EN, control);
+	cpts_write32(cpts, TS_PEND_EN, int_enable);
+
+	timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+
+	cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
+	if (IS_ERR(cpts->clock)) {
+		err = PTR_ERR(cpts->clock);
+		cpts->clock = NULL;
+		goto err_ptp;
+	}
+	cpts->phc_index = ptp_clock_index(cpts->clock);
+
+	ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
+	return 0;
+
+err_ptp:
+	clk_disable(cpts->refclk);
+	return err;
+}
+EXPORT_SYMBOL_GPL(cpts_register);
+
+void cpts_unregister(struct cpts *cpts)
+{
+	if (WARN_ON(!cpts->clock))
+		return;
+
+	ptp_clock_unregister(cpts->clock);
+	cpts->clock = NULL;
+
+	cpts_write32(cpts, 0, int_enable);
+	cpts_write32(cpts, 0, control);
+
+	/* Drop all packet */
+	skb_queue_purge(&cpts->txq);
+
+	clk_disable(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_unregister);
+
+static void cpts_calc_mult_shift(struct cpts *cpts)
+{
+	u64 frac, maxsec, ns;
+	u32 freq;
+
+	freq = clk_get_rate(cpts->refclk);
+
+	/* Calc the maximum number of seconds which we can run before
+	 * wrapping around.
+	 */
+	maxsec = cpts->cc.mask;
+	do_div(maxsec, freq);
+	/* limit conversation rate to 10 sec as higher values will produce
+	 * too small mult factors and so reduce the conversion accuracy
+	 */
+	if (maxsec > 10)
+		maxsec = 10;
+
+	/* Calc overflow check period (maxsec / 2) */
+	cpts->ov_check_period = (HZ * maxsec) / 2;
+	dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
+		 cpts->ov_check_period);
+
+	if (cpts->cc.mult || cpts->cc.shift)
+		return;
+
+	clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
+			       freq, NSEC_PER_SEC, maxsec);
+
+	frac = 0;
+	ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
+
+	dev_info(cpts->dev,
+		 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
+		 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
+}
+
+static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
+{
+	int ret = -EINVAL;
+	u32 prop;
+
+	if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
+		cpts->cc.mult = prop;
+
+	if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
+		cpts->cc.shift = prop;
+
+	if ((cpts->cc.mult && !cpts->cc.shift) ||
+	    (!cpts->cc.mult && cpts->cc.shift))
+		goto of_error;
+
+	return 0;
+
+of_error:
+	dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
+	return ret;
+}
+
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+			 struct device_node *node)
+{
+	struct cpts *cpts;
+	int ret;
+
+	cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+	if (!cpts)
+		return ERR_PTR(-ENOMEM);
+
+	cpts->dev = dev;
+	cpts->reg = (struct cpsw_cpts __iomem *)regs;
+	spin_lock_init(&cpts->lock);
+
+	ret = cpts_of_parse(cpts, node);
+	if (ret)
+		return ERR_PTR(ret);
+
+	cpts->refclk = devm_clk_get(dev, "cpts");
+	if (IS_ERR(cpts->refclk)) {
+		dev_err(dev, "Failed to get cpts refclk\n");
+		return ERR_CAST(cpts->refclk);
+	}
+
+	clk_prepare(cpts->refclk);
+
+	cpts->cc.read = cpts_systim_read;
+	cpts->cc.mask = CLOCKSOURCE_MASK(32);
+	cpts->info = cpts_info;
+
+	cpts_calc_mult_shift(cpts);
+	/* save cc.mult original value as it can be modified
+	 * by cpts_ptp_adjfreq().
+	 */
+	cpts->cc_mult = cpts->cc.mult;
+
+	return cpts;
+}
+EXPORT_SYMBOL_GPL(cpts_create);
+
+void cpts_release(struct cpts *cpts)
+{
+	if (!cpts)
+		return;
+
+	if (WARN_ON(!cpts->refclk))
+		return;
+
+	clk_unprepare(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPTS driver");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 0000000..73d73fa
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,225 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef _TI_CPTS_H_
+#define _TI_CPTS_H_
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clocksource.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/skbuff.h>
+#include <linux/ptp_classify.h>
+#include <linux/timecounter.h>
+
+struct cpsw_cpts {
+	u32 idver;                /* Identification and version */
+	u32 control;              /* Time sync control */
+	u32 res1;
+	u32 ts_push;              /* Time stamp event push */
+	u32 ts_load_val;          /* Time stamp load value */
+	u32 ts_load_en;           /* Time stamp load enable */
+	u32 res2[2];
+	u32 intstat_raw;          /* Time sync interrupt status raw */
+	u32 intstat_masked;       /* Time sync interrupt status masked */
+	u32 int_enable;           /* Time sync interrupt enable */
+	u32 res3;
+	u32 event_pop;            /* Event interrupt pop */
+	u32 event_low;            /* 32 Bit Event Time Stamp */
+	u32 event_high;           /* Event Type Fields */
+};
+
+/* Bit definitions for the IDVER register */
+#define TX_IDENT_SHIFT       (16)    /* TX Identification Value */
+#define TX_IDENT_MASK        (0xffff)
+#define RTL_VER_SHIFT        (11)    /* RTL Version Value */
+#define RTL_VER_MASK         (0x1f)
+#define MAJOR_VER_SHIFT      (8)     /* Major Version Value */
+#define MAJOR_VER_MASK       (0x7)
+#define MINOR_VER_SHIFT      (0)     /* Minor Version Value */
+#define MINOR_VER_MASK       (0xff)
+
+/* Bit definitions for the CONTROL register */
+#define HW4_TS_PUSH_EN       (1<<11) /* Hardware push 4 enable */
+#define HW3_TS_PUSH_EN       (1<<10) /* Hardware push 3 enable */
+#define HW2_TS_PUSH_EN       (1<<9)  /* Hardware push 2 enable */
+#define HW1_TS_PUSH_EN       (1<<8)  /* Hardware push 1 enable */
+#define INT_TEST             (1<<1)  /* Interrupt Test */
+#define CPTS_EN              (1<<0)  /* Time Sync Enable */
+
+/*
+ * Definitions for the single bit resisters:
+ * TS_PUSH TS_LOAD_EN  INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
+ */
+#define TS_PUSH             (1<<0)  /* Time stamp event push */
+#define TS_LOAD_EN          (1<<0)  /* Time Stamp Load */
+#define TS_PEND_RAW         (1<<0)  /* int read (before enable) */
+#define TS_PEND             (1<<0)  /* masked interrupt read (after enable) */
+#define TS_PEND_EN          (1<<0)  /* masked interrupt enable */
+#define EVENT_POP           (1<<0)  /* writing discards one event */
+
+/* Bit definitions for the EVENT_HIGH register */
+#define PORT_NUMBER_SHIFT    (24)    /* Indicates Ethernet port or HW pin */
+#define PORT_NUMBER_MASK     (0x1f)
+#define EVENT_TYPE_SHIFT     (20)    /* Time sync event type */
+#define EVENT_TYPE_MASK      (0xf)
+#define MESSAGE_TYPE_SHIFT   (16)    /* PTP message type */
+#define MESSAGE_TYPE_MASK    (0xf)
+#define SEQUENCE_ID_SHIFT    (0)     /* PTP message sequence ID */
+#define SEQUENCE_ID_MASK     (0xffff)
+
+enum {
+	CPTS_EV_PUSH, /* Time Stamp Push Event */
+	CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+	CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+	CPTS_EV_HW,   /* Hardware Time Stamp Push Event */
+	CPTS_EV_RX,   /* Ethernet Receive Event */
+	CPTS_EV_TX,   /* Ethernet Transmit Event */
+};
+
+#define CPTS_FIFO_DEPTH 16
+#define CPTS_MAX_EVENTS 32
+
+struct cpts_event {
+	struct list_head list;
+	unsigned long tmo;
+	u32 high;
+	u32 low;
+};
+
+struct cpts {
+	struct device *dev;
+	struct cpsw_cpts __iomem *reg;
+	int tx_enable;
+	int rx_enable;
+	struct ptp_clock_info info;
+	struct ptp_clock *clock;
+	spinlock_t lock; /* protects time registers */
+	u32 cc_mult; /* for the nominal frequency */
+	struct cyclecounter cc;
+	struct timecounter tc;
+	int phc_index;
+	struct clk *refclk;
+	struct list_head events;
+	struct list_head pool;
+	struct cpts_event pool_data[CPTS_MAX_EVENTS];
+	unsigned long ov_check_period;
+	struct sk_buff_head txq;
+};
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+int cpts_register(struct cpts *cpts);
+void cpts_unregister(struct cpts *cpts);
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+			 struct device_node *node);
+void cpts_release(struct cpts *cpts);
+
+static inline void cpts_rx_enable(struct cpts *cpts, int enable)
+{
+	cpts->rx_enable = enable;
+}
+
+static inline bool cpts_is_rx_enabled(struct cpts *cpts)
+{
+	return !!cpts->rx_enable;
+}
+
+static inline void cpts_tx_enable(struct cpts *cpts, int enable)
+{
+	cpts->tx_enable = enable;
+}
+
+static inline bool cpts_is_tx_enabled(struct cpts *cpts)
+{
+	return !!cpts->tx_enable;
+}
+
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+	unsigned int class = ptp_classify_raw(skb);
+
+	if (class == PTP_CLASS_NONE)
+		return false;
+
+	return true;
+}
+
+#else
+struct cpts;
+
+static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+
+static inline
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+			 struct device_node *node)
+{
+	return NULL;
+}
+
+static inline void cpts_release(struct cpts *cpts)
+{
+}
+
+static inline int
+cpts_register(struct cpts *cpts)
+{
+	return 0;
+}
+
+static inline void cpts_unregister(struct cpts *cpts)
+{
+}
+
+static inline void cpts_rx_enable(struct cpts *cpts, int enable)
+{
+}
+
+static inline bool cpts_is_rx_enabled(struct cpts *cpts)
+{
+	return false;
+}
+
+static inline void cpts_tx_enable(struct cpts *cpts, int enable)
+{
+}
+
+static inline bool cpts_is_tx_enabled(struct cpts *cpts)
+{
+	return false;
+}
+
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+	return false;
+}
+#endif
+
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
new file mode 100644
index 0000000..4236dcd
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -0,0 +1,1353 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER		0x00
+#define CPDMA_TXCONTROL		0x04
+#define CPDMA_TXTEARDOWN	0x08
+#define CPDMA_RXIDVER		0x10
+#define CPDMA_RXCONTROL		0x14
+#define CPDMA_SOFTRESET		0x1c
+#define CPDMA_RXTEARDOWN	0x18
+#define CPDMA_TX_PRI0_RATE	0x30
+#define CPDMA_TXINTSTATRAW	0x80
+#define CPDMA_TXINTSTATMASKED	0x84
+#define CPDMA_TXINTMASKSET	0x88
+#define CPDMA_TXINTMASKCLEAR	0x8c
+#define CPDMA_MACINVECTOR	0x90
+#define CPDMA_MACEOIVECTOR	0x94
+#define CPDMA_RXINTSTATRAW	0xa0
+#define CPDMA_RXINTSTATMASKED	0xa4
+#define CPDMA_RXINTMASKSET	0xa8
+#define CPDMA_RXINTMASKCLEAR	0xac
+#define CPDMA_DMAINTSTATRAW	0xb0
+#define CPDMA_DMAINTSTATMASKED	0xb4
+#define CPDMA_DMAINTMASKSET	0xb8
+#define CPDMA_DMAINTMASKCLEAR	0xbc
+#define CPDMA_DMAINT_HOSTERR	BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL	0x20
+#define CPDMA_DMASTATUS		0x24
+#define CPDMA_RXBUFFOFS		0x28
+#define CPDMA_EM_CONTROL	0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP		BIT(31)
+#define CPDMA_DESC_EOP		BIT(30)
+#define CPDMA_DESC_OWNER	BIT(29)
+#define CPDMA_DESC_EOQ		BIT(28)
+#define CPDMA_DESC_TD_COMPLETE	BIT(27)
+#define CPDMA_DESC_PASS_CRC	BIT(26)
+#define CPDMA_DESC_TO_PORT_EN	BIT(20)
+#define CPDMA_TO_PORT_SHIFT	16
+#define CPDMA_DESC_PORT_MASK	(BIT(18) | BIT(17) | BIT(16))
+#define CPDMA_DESC_CRC_LEN	4
+
+#define CPDMA_TEARDOWN_VALUE	0xfffffffc
+
+#define CPDMA_MAX_RLIM_CNT	16384
+
+struct cpdma_desc {
+	/* hardware fields */
+	u32			hw_next;
+	u32			hw_buffer;
+	u32			hw_len;
+	u32			hw_mode;
+	/* software fields */
+	void			*sw_token;
+	u32			sw_buffer;
+	u32			sw_len;
+};
+
+struct cpdma_desc_pool {
+	phys_addr_t		phys;
+	dma_addr_t		hw_addr;
+	void __iomem		*iomap;		/* ioremap map */
+	void			*cpumap;	/* dma_alloc map */
+	int			desc_size, mem_size;
+	int			num_desc;
+	struct device		*dev;
+	struct gen_pool		*gen_pool;
+};
+
+enum cpdma_state {
+	CPDMA_STATE_IDLE,
+	CPDMA_STATE_ACTIVE,
+	CPDMA_STATE_TEARDOWN,
+};
+
+struct cpdma_ctlr {
+	enum cpdma_state	state;
+	struct cpdma_params	params;
+	struct device		*dev;
+	struct cpdma_desc_pool	*pool;
+	spinlock_t		lock;
+	struct cpdma_chan	*channels[2 * CPDMA_MAX_CHANNELS];
+	int chan_num;
+	int			num_rx_desc; /* RX descriptors number */
+	int			num_tx_desc; /* TX descriptors number */
+};
+
+struct cpdma_chan {
+	struct cpdma_desc __iomem	*head, *tail;
+	void __iomem			*hdp, *cp, *rxfree;
+	enum cpdma_state		state;
+	struct cpdma_ctlr		*ctlr;
+	int				chan_num;
+	spinlock_t			lock;
+	int				count;
+	u32				desc_num;
+	u32				mask;
+	cpdma_handler_fn		handler;
+	enum dma_data_direction		dir;
+	struct cpdma_chan_stats		stats;
+	/* offsets into dmaregs */
+	int	int_set, int_clear, td;
+	int				weight;
+	u32				rate_factor;
+	u32				rate;
+};
+
+struct cpdma_control_info {
+	u32		reg;
+	u32		shift, mask;
+	int		access;
+#define ACCESS_RO	BIT(0)
+#define ACCESS_WO	BIT(1)
+#define ACCESS_RW	(ACCESS_RO | ACCESS_WO)
+};
+
+static struct cpdma_control_info controls[] = {
+	[CPDMA_TX_RLIM]		  = {CPDMA_DMACONTROL,	8,  0xffff, ACCESS_RW},
+	[CPDMA_CMD_IDLE]	  = {CPDMA_DMACONTROL,	3,  1,      ACCESS_WO},
+	[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,	4,  1,      ACCESS_RW},
+	[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,	2,  1,      ACCESS_RW},
+	[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,	1,  1,      ACCESS_RW},
+	[CPDMA_TX_PRIO_FIXED]	  = {CPDMA_DMACONTROL,	0,  1,      ACCESS_RW},
+	[CPDMA_STAT_IDLE]	  = {CPDMA_DMASTATUS,	31, 1,      ACCESS_RO},
+	[CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,	20, 0xf,    ACCESS_RW},
+	[CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,	16, 0x7,    ACCESS_RW},
+	[CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,	12, 0xf,    ACCESS_RW},
+	[CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,	8,  0x7,    ACCESS_RW},
+	[CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,	0,  0xffff, ACCESS_RW},
+};
+
+#define tx_chan_num(chan)	(chan)
+#define rx_chan_num(chan)	((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan)	((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan)	(!is_rx_chan(chan))
+#define __chan_linear(chan_num)	((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan)	__chan_linear((chan)->chan_num)
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs		params.dmaregs
+#define num_chan	params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs)		readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld)		readl((chan)->fld)
+#define desc_read(desc, fld)		readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v)	writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v)	writel(v, (chan)->fld)
+#define desc_write(desc, fld, v)	writel((u32)(v), &(desc)->fld)
+
+#define cpdma_desc_to_port(chan, mode, directed)			\
+	do {								\
+		if (!is_rx_chan(chan) && ((directed == 1) ||		\
+					  (directed == 2)))		\
+			mode |= (CPDMA_DESC_TO_PORT_EN |		\
+				 (directed << CPDMA_TO_PORT_SHIFT));	\
+	} while (0)
+
+static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
+{
+	struct cpdma_desc_pool *pool = ctlr->pool;
+
+	if (!pool)
+		return;
+
+	WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+	     "cpdma_desc_pool size %zd != avail %zd",
+	     gen_pool_size(pool->gen_pool),
+	     gen_pool_avail(pool->gen_pool));
+	if (pool->cpumap)
+		dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
+				  pool->phys);
+}
+
+/*
+ * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors.  Some other
+ * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
+ * abstract out these details
+ */
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+{
+	struct cpdma_params *cpdma_params = &ctlr->params;
+	struct cpdma_desc_pool *pool;
+	int ret = -ENOMEM;
+
+	pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		goto gen_pool_create_fail;
+	ctlr->pool = pool;
+
+	pool->mem_size	= cpdma_params->desc_mem_size;
+	pool->desc_size	= ALIGN(sizeof(struct cpdma_desc),
+				cpdma_params->desc_align);
+	pool->num_desc	= pool->mem_size / pool->desc_size;
+
+	if (cpdma_params->descs_pool_size) {
+		/* recalculate memory size required cpdma descriptor pool
+		 * basing on number of descriptors specified by user and
+		 * if memory size > CPPI internal RAM size (desc_mem_size)
+		 * then switch to use DDR
+		 */
+		pool->num_desc = cpdma_params->descs_pool_size;
+		pool->mem_size = pool->desc_size * pool->num_desc;
+		if (pool->mem_size > cpdma_params->desc_mem_size)
+			cpdma_params->desc_mem_phys = 0;
+	}
+
+	pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
+					      -1, "cpdma");
+	if (IS_ERR(pool->gen_pool)) {
+		ret = PTR_ERR(pool->gen_pool);
+		dev_err(ctlr->dev, "pool create failed %d\n", ret);
+		goto gen_pool_create_fail;
+	}
+
+	if (cpdma_params->desc_mem_phys) {
+		pool->phys  = cpdma_params->desc_mem_phys;
+		pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
+					   pool->mem_size);
+		pool->hw_addr = cpdma_params->desc_hw_addr;
+	} else {
+		pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
+						  &pool->hw_addr, GFP_KERNEL);
+		pool->iomap = (void __iomem __force *)pool->cpumap;
+		pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
+	}
+
+	if (!pool->iomap)
+		goto gen_pool_create_fail;
+
+	ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
+				pool->phys, pool->mem_size, -1);
+	if (ret < 0) {
+		dev_err(ctlr->dev, "pool add failed %d\n", ret);
+		goto gen_pool_add_virt_fail;
+	}
+
+	return 0;
+
+gen_pool_add_virt_fail:
+	cpdma_desc_pool_destroy(ctlr);
+gen_pool_create_fail:
+	ctlr->pool = NULL;
+	return ret;
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+		  struct cpdma_desc __iomem *desc)
+{
+	if (!desc)
+		return 0;
+	return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+	return dma ? pool->iomap + dma - pool->hw_addr : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool)
+{
+	return (struct cpdma_desc __iomem *)
+		gen_pool_alloc(pool->gen_pool, pool->desc_size);
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+			    struct cpdma_desc __iomem *desc, int num_desc)
+{
+	gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
+}
+
+static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+	struct cpdma_control_info *info = &controls[control];
+	u32 val;
+
+	if (!ctlr->params.has_ext_regs)
+		return -ENOTSUPP;
+
+	if (ctlr->state != CPDMA_STATE_ACTIVE)
+		return -EINVAL;
+
+	if (control < 0 || control >= ARRAY_SIZE(controls))
+		return -ENOENT;
+
+	if ((info->access & ACCESS_WO) != ACCESS_WO)
+		return -EPERM;
+
+	val  = dma_reg_read(ctlr, info->reg);
+	val &= ~(info->mask << info->shift);
+	val |= (value & info->mask) << info->shift;
+	dma_reg_write(ctlr, info->reg, val);
+
+	return 0;
+}
+
+static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+	struct cpdma_control_info *info = &controls[control];
+	int ret;
+
+	if (!ctlr->params.has_ext_regs)
+		return -ENOTSUPP;
+
+	if (ctlr->state != CPDMA_STATE_ACTIVE)
+		return -EINVAL;
+
+	if (control < 0 || control >= ARRAY_SIZE(controls))
+		return -ENOENT;
+
+	if ((info->access & ACCESS_RO) != ACCESS_RO)
+		return -EPERM;
+
+	ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+	return ret;
+}
+
+/* cpdma_chan_set_chan_shaper - set shaper for a channel
+ * Has to be called under ctlr lock
+ */
+static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr *ctlr = chan->ctlr;
+	u32 rate_reg;
+	u32 rmask;
+	int ret;
+
+	if (!chan->rate)
+		return 0;
+
+	rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
+	dma_reg_write(ctlr, rate_reg, chan->rate_factor);
+
+	rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
+	rmask |= chan->mask;
+
+	ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
+	return ret;
+}
+
+static int cpdma_chan_on(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr *ctlr = chan->ctlr;
+	struct cpdma_desc_pool	*pool = ctlr->pool;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	if (chan->state != CPDMA_STATE_IDLE) {
+		spin_unlock_irqrestore(&chan->lock, flags);
+		return -EBUSY;
+	}
+	if (ctlr->state != CPDMA_STATE_ACTIVE) {
+		spin_unlock_irqrestore(&chan->lock, flags);
+		return -EINVAL;
+	}
+	dma_reg_write(ctlr, chan->int_set, chan->mask);
+	chan->state = CPDMA_STATE_ACTIVE;
+	if (chan->head) {
+		chan_write(chan, hdp, desc_phys(pool, chan->head));
+		if (chan->rxfree)
+			chan_write(chan, rxfree, chan->count);
+	}
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+	return 0;
+}
+
+/* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
+ * rmask - mask of rate limited channels
+ * Returns min rate in Kb/s
+ */
+static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
+			       u32 *rmask, int *prio_mode)
+{
+	struct cpdma_ctlr *ctlr = ch->ctlr;
+	struct cpdma_chan *chan;
+	u32 old_rate = ch->rate;
+	u32 new_rmask = 0;
+	int rlim = 0;
+	int i;
+
+	for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
+		chan = ctlr->channels[i];
+		if (!chan)
+			continue;
+
+		if (chan == ch)
+			chan->rate = rate;
+
+		if (chan->rate) {
+			rlim = 1;
+			new_rmask |= chan->mask;
+			continue;
+		}
+
+		if (rlim)
+			goto err;
+	}
+
+	*rmask = new_rmask;
+	*prio_mode = rlim;
+	return 0;
+
+err:
+	ch->rate = old_rate;
+	dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
+		chan->chan_num);
+	return -EINVAL;
+}
+
+static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
+				  struct cpdma_chan *ch)
+{
+	u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
+	u32 best_send_cnt = 0, best_idle_cnt = 0;
+	u32 new_rate, best_rate = 0, rate_reg;
+	u64 send_cnt, idle_cnt;
+	u32 min_send_cnt, freq;
+	u64 divident, divisor;
+
+	if (!ch->rate) {
+		ch->rate_factor = 0;
+		goto set_factor;
+	}
+
+	freq = ctlr->params.bus_freq_mhz * 1000 * 32;
+	if (!freq) {
+		dev_err(ctlr->dev, "The bus frequency is not set\n");
+		return -EINVAL;
+	}
+
+	min_send_cnt = freq - ch->rate;
+	send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
+	while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
+		divident = ch->rate * send_cnt;
+		divisor = min_send_cnt;
+		idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
+
+		divident = freq * idle_cnt;
+		divisor = idle_cnt + send_cnt;
+		new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
+
+		delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
+		if (delta < best_delta) {
+			best_delta = delta;
+			best_send_cnt = send_cnt;
+			best_idle_cnt = idle_cnt;
+			best_rate = new_rate;
+
+			if (!delta)
+				break;
+		}
+
+		if (prev_delta >= delta) {
+			prev_delta = delta;
+			send_cnt++;
+			continue;
+		}
+
+		idle_cnt++;
+		divident = freq * idle_cnt;
+		send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
+		send_cnt -= idle_cnt;
+		prev_delta = UINT_MAX;
+	}
+
+	ch->rate = best_rate;
+	ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
+
+set_factor:
+	rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
+	dma_reg_write(ctlr, rate_reg, ch->rate_factor);
+	return 0;
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+	struct cpdma_ctlr *ctlr;
+
+	ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
+	if (!ctlr)
+		return NULL;
+
+	ctlr->state = CPDMA_STATE_IDLE;
+	ctlr->params = *params;
+	ctlr->dev = params->dev;
+	ctlr->chan_num = 0;
+	spin_lock_init(&ctlr->lock);
+
+	if (cpdma_desc_pool_create(ctlr))
+		return NULL;
+	/* split pool equally between RX/TX by default */
+	ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
+	ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
+
+	if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+		ctlr->num_chan = CPDMA_MAX_CHANNELS;
+	return ctlr;
+}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+	struct cpdma_chan *chan;
+	unsigned long flags;
+	int i, prio_mode;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	if (ctlr->state != CPDMA_STATE_IDLE) {
+		spin_unlock_irqrestore(&ctlr->lock, flags);
+		return -EBUSY;
+	}
+
+	if (ctlr->params.has_soft_reset) {
+		unsigned timeout = 10 * 100;
+
+		dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+		while (timeout) {
+			if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+				break;
+			udelay(10);
+			timeout--;
+		}
+		WARN_ON(!timeout);
+	}
+
+	for (i = 0; i < ctlr->num_chan; i++) {
+		writel(0, ctlr->params.txhdp + 4 * i);
+		writel(0, ctlr->params.rxhdp + 4 * i);
+		writel(0, ctlr->params.txcp + 4 * i);
+		writel(0, ctlr->params.rxcp + 4 * i);
+	}
+
+	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+	dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+	dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+	ctlr->state = CPDMA_STATE_ACTIVE;
+
+	prio_mode = 0;
+	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+		chan = ctlr->channels[i];
+		if (chan) {
+			cpdma_chan_set_chan_shaper(chan);
+			cpdma_chan_on(chan);
+
+			/* off prio mode if all tx channels are rate limited */
+			if (is_tx_chan(chan) && !chan->rate)
+				prio_mode = 1;
+		}
+	}
+
+	_cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
+	_cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
+
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	if (ctlr->state != CPDMA_STATE_ACTIVE) {
+		spin_unlock_irqrestore(&ctlr->lock, flags);
+		return -EINVAL;
+	}
+
+	ctlr->state = CPDMA_STATE_TEARDOWN;
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+		if (ctlr->channels[i])
+			cpdma_chan_stop(ctlr->channels[i]);
+	}
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+	ctlr->state = CPDMA_STATE_IDLE;
+
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+	int ret = 0, i;
+
+	if (!ctlr)
+		return -EINVAL;
+
+	if (ctlr->state != CPDMA_STATE_IDLE)
+		cpdma_ctlr_stop(ctlr);
+
+	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+		cpdma_chan_destroy(ctlr->channels[i]);
+
+	cpdma_desc_pool_destroy(ctlr);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	if (ctlr->state != CPDMA_STATE_ACTIVE) {
+		spin_unlock_irqrestore(&ctlr->lock, flags);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+		if (ctlr->channels[i])
+			cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+	}
+
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
+{
+	dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
+
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
+{
+	return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
+
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
+{
+	return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
+
+static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
+				 int rx, int desc_num,
+				 int per_ch_desc)
+{
+	struct cpdma_chan *chan, *most_chan = NULL;
+	int desc_cnt = desc_num;
+	int most_dnum = 0;
+	int min, max, i;
+
+	if (!desc_num)
+		return;
+
+	if (rx) {
+		min = rx_chan_num(0);
+		max = rx_chan_num(CPDMA_MAX_CHANNELS);
+	} else {
+		min = tx_chan_num(0);
+		max = tx_chan_num(CPDMA_MAX_CHANNELS);
+	}
+
+	for (i = min; i < max; i++) {
+		chan = ctlr->channels[i];
+		if (!chan)
+			continue;
+
+		if (chan->weight)
+			chan->desc_num = (chan->weight * desc_num) / 100;
+		else
+			chan->desc_num = per_ch_desc;
+
+		desc_cnt -= chan->desc_num;
+
+		if (most_dnum < chan->desc_num) {
+			most_dnum = chan->desc_num;
+			most_chan = chan;
+		}
+	}
+	/* use remains */
+	if (most_chan)
+		most_chan->desc_num += desc_cnt;
+}
+
+/**
+ * cpdma_chan_split_pool - Splits ctrl pool between all channels.
+ * Has to be called under ctlr lock
+ */
+int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+{
+	int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
+	int free_rx_num = 0, free_tx_num = 0;
+	int rx_weight = 0, tx_weight = 0;
+	int tx_desc_num, rx_desc_num;
+	struct cpdma_chan *chan;
+	int i;
+
+	if (!ctlr->chan_num)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+		chan = ctlr->channels[i];
+		if (!chan)
+			continue;
+
+		if (is_rx_chan(chan)) {
+			if (!chan->weight)
+				free_rx_num++;
+			rx_weight += chan->weight;
+		} else {
+			if (!chan->weight)
+				free_tx_num++;
+			tx_weight += chan->weight;
+		}
+	}
+
+	if (rx_weight > 100 || tx_weight > 100)
+		return -EINVAL;
+
+	tx_desc_num = ctlr->num_tx_desc;
+	rx_desc_num = ctlr->num_rx_desc;
+
+	if (free_tx_num) {
+		tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
+		tx_per_ch_desc /= free_tx_num;
+	}
+	if (free_rx_num) {
+		rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
+		rx_per_ch_desc /= free_rx_num;
+	}
+
+	cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
+	cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
+
+
+/* cpdma_chan_set_weight - set weight of a channel in percentage.
+ * Tx and Rx channels have separate weights. That is 100% for RX
+ * and 100% for Tx. The weight is used to split cpdma resources
+ * in correct proportion required by the channels, including number
+ * of descriptors. The channel rate is not enough to know the
+ * weight of a channel as the maximum rate of an interface is needed.
+ * If weight = 0, then channel uses rest of descriptors leaved by
+ * weighted channels.
+ */
+int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
+{
+	struct cpdma_ctlr *ctlr = ch->ctlr;
+	unsigned long flags, ch_flags;
+	int ret;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	spin_lock_irqsave(&ch->lock, ch_flags);
+	if (ch->weight == weight) {
+		spin_unlock_irqrestore(&ch->lock, ch_flags);
+		spin_unlock_irqrestore(&ctlr->lock, flags);
+		return 0;
+	}
+	ch->weight = weight;
+	spin_unlock_irqrestore(&ch->lock, ch_flags);
+
+	/* re-split pool using new channel weight */
+	ret = cpdma_chan_split_pool(ctlr);
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_set_weight);
+
+/* cpdma_chan_get_min_rate - get minimum allowed rate for channel
+ * Should be called before cpdma_chan_set_rate.
+ * Returns min rate in Kb/s
+ */
+u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
+{
+	unsigned int divident, divisor;
+
+	divident = ctlr->params.bus_freq_mhz * 32 * 1000;
+	divisor = 1 + CPDMA_MAX_RLIM_CNT;
+
+	return DIV_ROUND_UP(divident, divisor);
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
+
+/* cpdma_chan_set_rate - limits bandwidth for transmit channel.
+ * The bandwidth * limited channels have to be in order beginning from lowest.
+ * ch - transmit channel the bandwidth is configured for
+ * rate - bandwidth in Kb/s, if 0 - then off shaper
+ */
+int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
+{
+	unsigned long flags, ch_flags;
+	struct cpdma_ctlr *ctlr;
+	int ret, prio_mode;
+	u32 rmask;
+
+	if (!ch || !is_tx_chan(ch))
+		return -EINVAL;
+
+	if (ch->rate == rate)
+		return rate;
+
+	ctlr = ch->ctlr;
+	spin_lock_irqsave(&ctlr->lock, flags);
+	spin_lock_irqsave(&ch->lock, ch_flags);
+
+	ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
+	if (ret)
+		goto err;
+
+	ret = cpdma_chan_set_factors(ctlr, ch);
+	if (ret)
+		goto err;
+
+	spin_unlock_irqrestore(&ch->lock, ch_flags);
+
+	/* on shapers */
+	_cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
+	_cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return ret;
+
+err:
+	spin_unlock_irqrestore(&ch->lock, ch_flags);
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_set_rate);
+
+u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
+{
+	unsigned long flags;
+	u32 rate;
+
+	spin_lock_irqsave(&ch->lock, flags);
+	rate = ch->rate;
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	return rate;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_rate);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+				     cpdma_handler_fn handler, int rx_type)
+{
+	int offset = chan_num * 4;
+	struct cpdma_chan *chan;
+	unsigned long flags;
+
+	chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
+
+	if (__chan_linear(chan_num) >= ctlr->num_chan)
+		return ERR_PTR(-EINVAL);
+
+	chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	if (ctlr->channels[chan_num]) {
+		spin_unlock_irqrestore(&ctlr->lock, flags);
+		devm_kfree(ctlr->dev, chan);
+		return ERR_PTR(-EBUSY);
+	}
+
+	chan->ctlr	= ctlr;
+	chan->state	= CPDMA_STATE_IDLE;
+	chan->chan_num	= chan_num;
+	chan->handler	= handler;
+	chan->rate	= 0;
+	chan->weight	= 0;
+
+	if (is_rx_chan(chan)) {
+		chan->hdp	= ctlr->params.rxhdp + offset;
+		chan->cp	= ctlr->params.rxcp + offset;
+		chan->rxfree	= ctlr->params.rxfree + offset;
+		chan->int_set	= CPDMA_RXINTMASKSET;
+		chan->int_clear	= CPDMA_RXINTMASKCLEAR;
+		chan->td	= CPDMA_RXTEARDOWN;
+		chan->dir	= DMA_FROM_DEVICE;
+	} else {
+		chan->hdp	= ctlr->params.txhdp + offset;
+		chan->cp	= ctlr->params.txcp + offset;
+		chan->int_set	= CPDMA_TXINTMASKSET;
+		chan->int_clear	= CPDMA_TXINTMASKCLEAR;
+		chan->td	= CPDMA_TXTEARDOWN;
+		chan->dir	= DMA_TO_DEVICE;
+	}
+	chan->mask = BIT(chan_linear(chan));
+
+	spin_lock_init(&chan->lock);
+
+	ctlr->channels[chan_num] = chan;
+	ctlr->chan_num++;
+
+	cpdma_chan_split_pool(ctlr);
+
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return chan;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_create);
+
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
+{
+	unsigned long flags;
+	int desc_num;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	desc_num = chan->desc_num;
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return desc_num;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr *ctlr;
+	unsigned long flags;
+
+	if (!chan)
+		return -EINVAL;
+	ctlr = chan->ctlr;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	if (chan->state != CPDMA_STATE_IDLE)
+		cpdma_chan_stop(chan);
+	ctlr->channels[chan->chan_num] = NULL;
+	ctlr->chan_num--;
+	devm_kfree(ctlr->dev, chan);
+	cpdma_chan_split_pool(ctlr);
+
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+			 struct cpdma_chan_stats *stats)
+{
+	unsigned long flags;
+	if (!chan)
+		return -EINVAL;
+	spin_lock_irqsave(&chan->lock, flags);
+	memcpy(stats, &chan->stats, sizeof(*stats));
+	spin_unlock_irqrestore(&chan->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+				struct cpdma_desc __iomem *desc)
+{
+	struct cpdma_ctlr		*ctlr = chan->ctlr;
+	struct cpdma_desc __iomem	*prev = chan->tail;
+	struct cpdma_desc_pool		*pool = ctlr->pool;
+	dma_addr_t			desc_dma;
+	u32				mode;
+
+	desc_dma = desc_phys(pool, desc);
+
+	/* simple case - idle channel */
+	if (!chan->head) {
+		chan->stats.head_enqueue++;
+		chan->head = desc;
+		chan->tail = desc;
+		if (chan->state == CPDMA_STATE_ACTIVE)
+			chan_write(chan, hdp, desc_dma);
+		return;
+	}
+
+	/* first chain the descriptor at the tail of the list */
+	desc_write(prev, hw_next, desc_dma);
+	chan->tail = desc;
+	chan->stats.tail_enqueue++;
+
+	/* next check if EOQ has been triggered already */
+	mode = desc_read(prev, hw_mode);
+	if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+	    (chan->state == CPDMA_STATE_ACTIVE)) {
+		desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+		chan_write(chan, hdp, desc_dma);
+		chan->stats.misqueued++;
+	}
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+		      int len, int directed)
+{
+	struct cpdma_ctlr		*ctlr = chan->ctlr;
+	struct cpdma_desc __iomem	*desc;
+	dma_addr_t			buffer;
+	unsigned long			flags;
+	u32				mode;
+	int				ret = 0;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (chan->state == CPDMA_STATE_TEARDOWN) {
+		ret = -EINVAL;
+		goto unlock_ret;
+	}
+
+	if (chan->count >= chan->desc_num)	{
+		chan->stats.desc_alloc_fail++;
+		ret = -ENOMEM;
+		goto unlock_ret;
+	}
+
+	desc = cpdma_desc_alloc(ctlr->pool);
+	if (!desc) {
+		chan->stats.desc_alloc_fail++;
+		ret = -ENOMEM;
+		goto unlock_ret;
+	}
+
+	if (len < ctlr->params.min_packet_size) {
+		len = ctlr->params.min_packet_size;
+		chan->stats.runt_transmit_buff++;
+	}
+
+	buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+	ret = dma_mapping_error(ctlr->dev, buffer);
+	if (ret) {
+		cpdma_desc_free(ctlr->pool, desc, 1);
+		ret = -EINVAL;
+		goto unlock_ret;
+	}
+
+	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+	cpdma_desc_to_port(chan, mode, directed);
+
+	/* Relaxed IO accessors can be used here as there is read barrier
+	 * at the end of write sequence.
+	 */
+	writel_relaxed(0, &desc->hw_next);
+	writel_relaxed(buffer, &desc->hw_buffer);
+	writel_relaxed(len, &desc->hw_len);
+	writel_relaxed(mode | len, &desc->hw_mode);
+	writel_relaxed((uintptr_t)token, &desc->sw_token);
+	writel_relaxed(buffer, &desc->sw_buffer);
+	writel_relaxed(len, &desc->sw_len);
+	desc_read(desc, sw_len);
+
+	__cpdma_chan_submit(chan, desc);
+
+	if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+		chan_write(chan, rxfree, 1);
+
+	chan->count++;
+
+unlock_ret:
+	spin_unlock_irqrestore(&chan->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_submit);
+
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr	*ctlr = chan->ctlr;
+	struct cpdma_desc_pool	*pool = ctlr->pool;
+	bool			free_tx_desc;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	free_tx_desc = (chan->count < chan->desc_num) &&
+			 gen_pool_avail(pool->gen_pool);
+	spin_unlock_irqrestore(&chan->lock, flags);
+	return free_tx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+			      struct cpdma_desc __iomem *desc,
+			      int outlen, int status)
+{
+	struct cpdma_ctlr		*ctlr = chan->ctlr;
+	struct cpdma_desc_pool		*pool = ctlr->pool;
+	dma_addr_t			buff_dma;
+	int				origlen;
+	uintptr_t			token;
+
+	token      = desc_read(desc, sw_token);
+	buff_dma   = desc_read(desc, sw_buffer);
+	origlen    = desc_read(desc, sw_len);
+
+	dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+	cpdma_desc_free(pool, desc, 1);
+	(*chan->handler)((void *)token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr		*ctlr = chan->ctlr;
+	struct cpdma_desc __iomem	*desc;
+	int				status, outlen;
+	int				cb_status = 0;
+	struct cpdma_desc_pool		*pool = ctlr->pool;
+	dma_addr_t			desc_dma;
+	unsigned long			flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	desc = chan->head;
+	if (!desc) {
+		chan->stats.empty_dequeue++;
+		status = -ENOENT;
+		goto unlock_ret;
+	}
+	desc_dma = desc_phys(pool, desc);
+
+	status	= desc_read(desc, hw_mode);
+	outlen	= status & 0x7ff;
+	if (status & CPDMA_DESC_OWNER) {
+		chan->stats.busy_dequeue++;
+		status = -EBUSY;
+		goto unlock_ret;
+	}
+
+	if (status & CPDMA_DESC_PASS_CRC)
+		outlen -= CPDMA_DESC_CRC_LEN;
+
+	status	= status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+			    CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
+
+	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+	chan_write(chan, cp, desc_dma);
+	chan->count--;
+	chan->stats.good_dequeue++;
+
+	if ((status & CPDMA_DESC_EOQ) && chan->head) {
+		chan->stats.requeue++;
+		chan_write(chan, hdp, desc_phys(pool, chan->head));
+	}
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+	if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
+		cb_status = -ENOSYS;
+	else
+		cb_status = status;
+
+	__cpdma_chan_free(chan, desc, outlen, cb_status);
+	return status;
+
+unlock_ret:
+	spin_unlock_irqrestore(&chan->lock, flags);
+	return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+	int used = 0, ret = 0;
+
+	if (chan->state != CPDMA_STATE_ACTIVE)
+		return -EINVAL;
+
+	while (used < quota) {
+		ret = __cpdma_chan_process(chan);
+		if (ret < 0)
+			break;
+		used++;
+	}
+	return used;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_process);
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr *ctlr = chan->ctlr;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	ret = cpdma_chan_set_chan_shaper(chan);
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+	if (ret)
+		return ret;
+
+	ret = cpdma_chan_on(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_start);
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+	struct cpdma_ctlr	*ctlr = chan->ctlr;
+	struct cpdma_desc_pool	*pool = ctlr->pool;
+	unsigned long		flags;
+	int			ret;
+	unsigned		timeout;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	if (chan->state == CPDMA_STATE_TEARDOWN) {
+		spin_unlock_irqrestore(&chan->lock, flags);
+		return -EINVAL;
+	}
+
+	chan->state = CPDMA_STATE_TEARDOWN;
+	dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+	/* trigger teardown */
+	dma_reg_write(ctlr, chan->td, chan_linear(chan));
+
+	/* wait for teardown complete */
+	timeout = 100 * 100; /* 100 ms */
+	while (timeout) {
+		u32 cp = chan_read(chan, cp);
+		if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+			break;
+		udelay(10);
+		timeout--;
+	}
+	WARN_ON(!timeout);
+	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+	/* handle completed packets */
+	spin_unlock_irqrestore(&chan->lock, flags);
+	do {
+		ret = __cpdma_chan_process(chan);
+		if (ret < 0)
+			break;
+	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+	spin_lock_irqsave(&chan->lock, flags);
+
+	/* remaining packets haven't been tx/rx'ed, clean them up */
+	while (chan->head) {
+		struct cpdma_desc __iomem *desc = chan->head;
+		dma_addr_t next_dma;
+
+		next_dma = desc_read(desc, hw_next);
+		chan->head = desc_from_phys(pool, next_dma);
+		chan->count--;
+		chan->stats.teardown_dequeue++;
+
+		/* issue callback without locks held */
+		spin_unlock_irqrestore(&chan->lock, flags);
+		__cpdma_chan_free(chan, desc, 0, -ENOSYS);
+		spin_lock_irqsave(&chan->lock, flags);
+	}
+
+	chan->state = CPDMA_STATE_IDLE;
+	spin_unlock_irqrestore(&chan->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_stop);
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	if (chan->state != CPDMA_STATE_ACTIVE) {
+		spin_unlock_irqrestore(&chan->lock, flags);
+		return -EINVAL;
+	}
+
+	dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+		      chan->mask);
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return 0;
+}
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	ret = _cpdma_control_get(ctlr, control);
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+
+	return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ctlr->lock, flags);
+	ret = _cpdma_control_set(ctlr, control, value);
+	spin_unlock_irqrestore(&ctlr->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_control_set);
+
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
+{
+	return ctlr->num_rx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
+
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
+{
+	return ctlr->num_tx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
+
+void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+{
+	ctlr->num_rx_desc = num_rx_desc;
+	ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
new file mode 100644
index 0000000..d399af5
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -0,0 +1,124 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS	BITS_PER_LONG
+
+#define CPDMA_RX_SOURCE_PORT(__status__)	((__status__ >> 16) & 0x7)
+
+#define CPDMA_RX_VLAN_ENCAP BIT(19)
+
+#define CPDMA_EOI_RX_THRESH	0x0
+#define CPDMA_EOI_RX		0x1
+#define CPDMA_EOI_TX		0x2
+#define CPDMA_EOI_MISC		0x3
+
+struct cpdma_params {
+	struct device		*dev;
+	void __iomem		*dmaregs;
+	void __iomem		*txhdp, *rxhdp, *txcp, *rxcp;
+	void __iomem		*rxthresh, *rxfree;
+	int			num_chan;
+	bool			has_soft_reset;
+	int			min_packet_size;
+	u32			desc_mem_phys;
+	u32			desc_hw_addr;
+	int			desc_mem_size;
+	int			desc_align;
+	u32			bus_freq_mhz;
+	u32			descs_pool_size;
+
+	/*
+	 * Some instances of embedded cpdma controllers have extra control and
+	 * status registers.  The following flag enables access to these
+	 * "extended" registers.
+	 */
+	bool			has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+	u32			head_enqueue;
+	u32			tail_enqueue;
+	u32			pad_enqueue;
+	u32			misqueued;
+	u32			desc_alloc_fail;
+	u32			pad_alloc_fail;
+	u32			runt_receive_buff;
+	u32			runt_transmit_buff;
+	u32			empty_dequeue;
+	u32			busy_dequeue;
+	u32			good_dequeue;
+	u32			requeue;
+	u32			teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+				     cpdma_handler_fn handler, int rx_type);
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+			 struct cpdma_chan_stats *stats);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+		      int len, int directed);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
+int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight);
+int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate);
+u32 cpdma_chan_get_rate(struct cpdma_chan *ch);
+u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr);
+
+enum cpdma_control {
+	CPDMA_TX_RLIM,			/* read-write */
+	CPDMA_CMD_IDLE,			/* write-only */
+	CPDMA_COPY_ERROR_FRAMES,	/* read-write */
+	CPDMA_RX_OFF_LEN_UPDATE,	/* read-write */
+	CPDMA_RX_OWNERSHIP_FLIP,	/* read-write */
+	CPDMA_TX_PRIO_FIXED,		/* read-write */
+	CPDMA_STAT_IDLE,		/* read-only */
+	CPDMA_STAT_TX_ERR_CHAN,		/* read-only */
+	CPDMA_STAT_TX_ERR_CODE,		/* read-only */
+	CPDMA_STAT_RX_ERR_CHAN,		/* read-only */
+	CPDMA_STAT_RX_ERR_CODE,		/* read-only */
+	CPDMA_RX_BUFFER_OFFSET,		/* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
+void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
+int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
new file mode 100644
index 0000000..f270bee
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -0,0 +1,2087 @@
+/*
+ * DaVinci Ethernet Medium Access Controller
+ *
+ * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ * History:
+ * 0-5 A number of folks worked on this driver in bits and pieces but the major
+ *     contribution came from Suraj Iyer and Anant Gole
+ * 6.0 Anant Gole - rewrote the driver as per Linux conventions
+ * 6.1 Chaithrika U S - added support for Gigabit and RMII features,
+ *     PHY layer usage
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/semaphore.h>
+#include <linux/phy.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+
+#include "cpsw.h"
+#include "davinci_cpdma.h"
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
+
+/* Netif debug messages possible */
+#define DAVINCI_EMAC_DEBUG	(NETIF_MSG_DRV | \
+				NETIF_MSG_PROBE | \
+				NETIF_MSG_LINK | \
+				NETIF_MSG_TIMER | \
+				NETIF_MSG_IFDOWN | \
+				NETIF_MSG_IFUP | \
+				NETIF_MSG_RX_ERR | \
+				NETIF_MSG_TX_ERR | \
+				NETIF_MSG_TX_QUEUED | \
+				NETIF_MSG_INTR | \
+				NETIF_MSG_TX_DONE | \
+				NETIF_MSG_RX_STATUS | \
+				NETIF_MSG_PKTDATA | \
+				NETIF_MSG_HW | \
+				NETIF_MSG_WOL)
+
+/* version info */
+#define EMAC_MAJOR_VERSION	6
+#define EMAC_MINOR_VERSION	1
+#define EMAC_MODULE_VERSION	"6.1"
+MODULE_VERSION(EMAC_MODULE_VERSION);
+static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+
+/* Configuration items */
+#define EMAC_DEF_PASS_CRC		(0) /* Do not pass CRC up to frames */
+#define EMAC_DEF_QOS_EN			(0) /* EMAC proprietary QoS disabled */
+#define EMAC_DEF_NO_BUFF_CHAIN		(0) /* No buffer chain */
+#define EMAC_DEF_MACCTRL_FRAME_EN	(0) /* Discard Maccontrol frames */
+#define EMAC_DEF_SHORT_FRAME_EN		(0) /* Discard short frames */
+#define EMAC_DEF_ERROR_FRAME_EN		(0) /* Discard error frames */
+#define EMAC_DEF_PROM_EN		(0) /* Promiscuous disabled */
+#define EMAC_DEF_PROM_CH		(0) /* Promiscuous channel is 0 */
+#define EMAC_DEF_BCAST_EN		(1) /* Broadcast enabled */
+#define EMAC_DEF_BCAST_CH		(0) /* Broadcast channel is 0 */
+#define EMAC_DEF_MCAST_EN		(1) /* Multicast enabled */
+#define EMAC_DEF_MCAST_CH		(0) /* Multicast channel is 0 */
+
+#define EMAC_DEF_TXPRIO_FIXED		(1) /* TX Priority is fixed */
+#define EMAC_DEF_TXPACING_EN		(0) /* TX pacing NOT supported*/
+
+#define EMAC_DEF_BUFFER_OFFSET		(0) /* Buffer offset to DMA (future) */
+#define EMAC_DEF_MIN_ETHPKTSIZE		(60) /* Minimum ethernet pkt size */
+#define EMAC_DEF_MAX_FRAME_SIZE		(1500 + 14 + 4 + 4)
+#define EMAC_DEF_TX_CH			(0) /* Default 0th channel */
+#define EMAC_DEF_RX_CH			(0) /* Default 0th channel */
+#define EMAC_DEF_RX_NUM_DESC		(128)
+#define EMAC_DEF_MAX_TX_CH		(1) /* Max TX channels configured */
+#define EMAC_DEF_MAX_RX_CH		(1) /* Max RX channels configured */
+#define EMAC_POLL_WEIGHT		(64) /* Default NAPI poll weight */
+
+/* Buffer descriptor parameters */
+#define EMAC_DEF_TX_MAX_SERVICE		(32) /* TX max service BD's */
+#define EMAC_DEF_RX_MAX_SERVICE		(64) /* should = netdev->weight */
+
+/* EMAC register related defines */
+#define EMAC_ALL_MULTI_REG_VALUE	(0xFFFFFFFF)
+#define EMAC_NUM_MULTICAST_BITS		(64)
+#define EMAC_TX_CONTROL_TX_ENABLE_VAL	(0x1)
+#define EMAC_RX_CONTROL_RX_ENABLE_VAL	(0x1)
+#define EMAC_MAC_HOST_ERR_INTMASK_VAL	(0x2)
+#define EMAC_RX_UNICAST_CLEAR_ALL	(0xFF)
+#define EMAC_INT_MASK_CLEAR		(0xFF)
+
+/* RX MBP register bit positions */
+#define EMAC_RXMBP_PASSCRC_MASK		BIT(30)
+#define EMAC_RXMBP_QOSEN_MASK		BIT(29)
+#define EMAC_RXMBP_NOCHAIN_MASK		BIT(28)
+#define EMAC_RXMBP_CMFEN_MASK		BIT(24)
+#define EMAC_RXMBP_CSFEN_MASK		BIT(23)
+#define EMAC_RXMBP_CEFEN_MASK		BIT(22)
+#define EMAC_RXMBP_CAFEN_MASK		BIT(21)
+#define EMAC_RXMBP_PROMCH_SHIFT		(16)
+#define EMAC_RXMBP_PROMCH_MASK		(0x7 << 16)
+#define EMAC_RXMBP_BROADEN_MASK		BIT(13)
+#define EMAC_RXMBP_BROADCH_SHIFT	(8)
+#define EMAC_RXMBP_BROADCH_MASK		(0x7 << 8)
+#define EMAC_RXMBP_MULTIEN_MASK		BIT(5)
+#define EMAC_RXMBP_MULTICH_SHIFT	(0)
+#define EMAC_RXMBP_MULTICH_MASK		(0x7)
+#define EMAC_RXMBP_CHMASK		(0x7)
+
+/* EMAC register definitions/bit maps used */
+# define EMAC_MBP_RXPROMISC		(0x00200000)
+# define EMAC_MBP_PROMISCCH(ch)		(((ch) & 0x7) << 16)
+# define EMAC_MBP_RXBCAST		(0x00002000)
+# define EMAC_MBP_BCASTCHAN(ch)		(((ch) & 0x7) << 8)
+# define EMAC_MBP_RXMCAST		(0x00000020)
+# define EMAC_MBP_MCASTCHAN(ch)		((ch) & 0x7)
+
+/* EMAC mac_control register */
+#define EMAC_MACCONTROL_TXPTYPE		BIT(9)
+#define EMAC_MACCONTROL_TXPACEEN	BIT(6)
+#define EMAC_MACCONTROL_GMIIEN		BIT(5)
+#define EMAC_MACCONTROL_GIGABITEN	BIT(7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN	BIT(0)
+#define EMAC_MACCONTROL_RMIISPEED_MASK	BIT(15)
+
+/* GIGABIT MODE related bits */
+#define EMAC_DM646X_MACCONTORL_GIG	BIT(7)
+#define EMAC_DM646X_MACCONTORL_GIGFORCE	BIT(17)
+
+/* EMAC mac_status register */
+#define EMAC_MACSTATUS_TXERRCODE_MASK	(0xF00000)
+#define EMAC_MACSTATUS_TXERRCODE_SHIFT	(20)
+#define EMAC_MACSTATUS_TXERRCH_MASK	(0x7)
+#define EMAC_MACSTATUS_TXERRCH_SHIFT	(16)
+#define EMAC_MACSTATUS_RXERRCODE_MASK	(0xF000)
+#define EMAC_MACSTATUS_RXERRCODE_SHIFT	(12)
+#define EMAC_MACSTATUS_RXERRCH_MASK	(0x7)
+#define EMAC_MACSTATUS_RXERRCH_SHIFT	(8)
+
+/* EMAC RX register masks */
+#define EMAC_RX_MAX_LEN_MASK		(0xFFFF)
+#define EMAC_RX_BUFFER_OFFSET_MASK	(0xFFFF)
+
+/* MAC_IN_VECTOR (0x180) register bit fields */
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT	BIT(17)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT	BIT(16)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC	BIT(8)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC	BIT(0)
+
+/** NOTE:: For DM646x the IN_VECTOR has changed */
+#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC	BIT(EMAC_DEF_RX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC	BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT	BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT	BIT(27)
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT		BIT(31)
+#define EMAC_CPPI_EOP_BIT		BIT(30)
+#define EMAC_CPPI_OWNERSHIP_BIT		BIT(29)
+#define EMAC_CPPI_EOQ_BIT		BIT(28)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27)
+#define EMAC_CPPI_PASS_CRC_BIT		BIT(26)
+#define EMAC_RX_BD_BUF_SIZE		(0xFFFF)
+#define EMAC_BD_LENGTH_FOR_CACHE	(16) /* only CPPI bytes */
+#define EMAC_RX_BD_PKT_LENGTH_MASK	(0xFFFF)
+
+/* Max hardware defines */
+#define EMAC_MAX_TXRX_CHANNELS		 (8)  /* Max hardware channels */
+#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
+
+/* EMAC Peripheral Device Register Memory Layout structure */
+#define EMAC_MACINVECTOR	0x90
+
+#define EMAC_DM646X_MACEOIVECTOR	0x94
+
+#define EMAC_MACINTSTATRAW	0xB0
+#define EMAC_MACINTSTATMASKED	0xB4
+#define EMAC_MACINTMASKSET	0xB8
+#define EMAC_MACINTMASKCLEAR	0xBC
+
+#define EMAC_RXMBPENABLE	0x100
+#define EMAC_RXUNICASTSET	0x104
+#define EMAC_RXUNICASTCLEAR	0x108
+#define EMAC_RXMAXLEN		0x10C
+#define EMAC_RXBUFFEROFFSET	0x110
+#define EMAC_RXFILTERLOWTHRESH	0x114
+
+#define EMAC_MACCONTROL		0x160
+#define EMAC_MACSTATUS		0x164
+#define EMAC_EMCONTROL		0x168
+#define EMAC_FIFOCONTROL	0x16C
+#define EMAC_MACCONFIG		0x170
+#define EMAC_SOFTRESET		0x174
+#define EMAC_MACSRCADDRLO	0x1D0
+#define EMAC_MACSRCADDRHI	0x1D4
+#define EMAC_MACHASH1		0x1D8
+#define EMAC_MACHASH2		0x1DC
+#define EMAC_MACADDRLO		0x500
+#define EMAC_MACADDRHI		0x504
+#define EMAC_MACINDEX		0x508
+
+/* EMAC statistics registers */
+#define EMAC_RXGOODFRAMES	0x200
+#define EMAC_RXBCASTFRAMES	0x204
+#define EMAC_RXMCASTFRAMES	0x208
+#define EMAC_RXPAUSEFRAMES	0x20C
+#define EMAC_RXCRCERRORS	0x210
+#define EMAC_RXALIGNCODEERRORS	0x214
+#define EMAC_RXOVERSIZED	0x218
+#define EMAC_RXJABBER		0x21C
+#define EMAC_RXUNDERSIZED	0x220
+#define EMAC_RXFRAGMENTS	0x224
+#define EMAC_RXFILTERED		0x228
+#define EMAC_RXQOSFILTERED	0x22C
+#define EMAC_RXOCTETS		0x230
+#define EMAC_TXGOODFRAMES	0x234
+#define EMAC_TXBCASTFRAMES	0x238
+#define EMAC_TXMCASTFRAMES	0x23C
+#define EMAC_TXPAUSEFRAMES	0x240
+#define EMAC_TXDEFERRED		0x244
+#define EMAC_TXCOLLISION	0x248
+#define EMAC_TXSINGLECOLL	0x24C
+#define EMAC_TXMULTICOLL	0x250
+#define EMAC_TXEXCESSIVECOLL	0x254
+#define EMAC_TXLATECOLL		0x258
+#define EMAC_TXUNDERRUN		0x25C
+#define EMAC_TXCARRIERSENSE	0x260
+#define EMAC_TXOCTETS		0x264
+#define EMAC_NETOCTETS		0x280
+#define EMAC_RXSOFOVERRUNS	0x284
+#define EMAC_RXMOFOVERRUNS	0x288
+#define EMAC_RXDMAOVERRUNS	0x28C
+
+/* EMAC DM644x control registers */
+#define EMAC_CTRL_EWCTL		(0x4)
+#define EMAC_CTRL_EWINTTCNT	(0x8)
+
+/* EMAC DM644x control module masks */
+#define EMAC_DM644X_EWINTCNT_MASK	0x1FFFF
+#define EMAC_DM644X_INTMIN_INTVL	0x1
+#define EMAC_DM644X_INTMAX_INTVL	(EMAC_DM644X_EWINTCNT_MASK)
+
+/* EMAC DM646X control module registers */
+#define EMAC_DM646X_CMINTCTRL	0x0C
+#define EMAC_DM646X_CMRXINTEN	0x14
+#define EMAC_DM646X_CMTXINTEN	0x18
+#define EMAC_DM646X_CMRXINTMAX	0x70
+#define EMAC_DM646X_CMTXINTMAX	0x74
+
+/* EMAC DM646X control module masks */
+#define EMAC_DM646X_INTPACEEN		(0x3 << 16)
+#define EMAC_DM646X_INTPRESCALE_MASK	(0x7FF << 0)
+#define EMAC_DM646X_CMINTMAX_CNT	63
+#define EMAC_DM646X_CMINTMIN_CNT	2
+#define EMAC_DM646X_CMINTMAX_INTVL	(1000 / EMAC_DM646X_CMINTMIN_CNT)
+#define EMAC_DM646X_CMINTMIN_INTVL	((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
+
+
+/* EMAC EOI codes for C0 */
+#define EMAC_DM646X_MAC_EOI_C0_RXEN	(0x01)
+#define EMAC_DM646X_MAC_EOI_C0_TXEN	(0x02)
+
+/* EMAC Stats Clear Mask */
+#define EMAC_STATS_CLR_MASK    (0xFFFFFFFF)
+
+/* emac_priv: EMAC private data structure
+ *
+ * EMAC adapter private data structure
+ */
+struct emac_priv {
+	u32 msg_enable;
+	struct net_device *ndev;
+	struct platform_device *pdev;
+	struct napi_struct napi;
+	char mac_addr[6];
+	void __iomem *remap_addr;
+	u32 emac_base_phys;
+	void __iomem *emac_base;
+	void __iomem *ctrl_base;
+	struct cpdma_ctlr *dma;
+	struct cpdma_chan *txchan;
+	struct cpdma_chan *rxchan;
+	u32 link; /* 1=link on, 0=link off */
+	u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
+	u32 duplex; /* Link duplex: 0=Half, 1=Full */
+	u32 rx_buf_size;
+	u32 isr_count;
+	u32 coal_intvl;
+	u32 bus_freq_mhz;
+	u8 rmii_en;
+	u8 version;
+	u32 mac_hash1;
+	u32 mac_hash2;
+	u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+	u32 rx_addr_type;
+	const char *phy_id;
+	struct device_node *phy_node;
+	spinlock_t lock;
+	/*platform specific members*/
+	void (*int_enable) (void);
+	void (*int_disable) (void);
+};
+
+/* EMAC TX Host Error description strings */
+static char *emac_txhost_errcodes[16] = {
+	"No error", "SOP error", "Ownership bit not set in SOP buffer",
+	"Zero Next Buffer Descriptor Pointer Without EOP",
+	"Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
+	"Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+	"Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* EMAC RX Host Error description strings */
+static char *emac_rxhost_errcodes[16] = {
+	"No error", "Reserved", "Ownership bit not set in input buffer",
+	"Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
+	"Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+	"Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* Helper macros */
+#define emac_read(reg)		  ioread32(priv->emac_base + (reg))
+#define emac_write(reg, val)      iowrite32(val, priv->emac_base + (reg))
+
+#define emac_ctrl_read(reg)	  ioread32((priv->ctrl_base + (reg)))
+#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
+
+/**
+ * emac_get_drvinfo - Get EMAC driver information
+ * @ndev: The DaVinci EMAC network adapter
+ * @info: ethtool info structure containing name and version
+ *
+ * Returns EMAC driver information (name and version)
+ *
+ */
+static void emac_get_drvinfo(struct net_device *ndev,
+			     struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, emac_version_string, sizeof(info->driver));
+	strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
+}
+
+/**
+ * emac_get_coalesce - Get interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Fetch the current interrupt coalesce settings
+ *
+ */
+static int emac_get_coalesce(struct net_device *ndev,
+				struct ethtool_coalesce *coal)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	coal->rx_coalesce_usecs = priv->coal_intvl;
+	return 0;
+
+}
+
+/**
+ * emac_set_coalesce - Set interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Set interrupt coalesce parameters
+ *
+ */
+static int emac_set_coalesce(struct net_device *ndev,
+				struct ethtool_coalesce *coal)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 int_ctrl, num_interrupts = 0;
+	u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
+
+	if (!coal->rx_coalesce_usecs)
+		return -EINVAL;
+
+	coal_intvl = coal->rx_coalesce_usecs;
+
+	switch (priv->version) {
+	case EMAC_VERSION_2:
+		int_ctrl =  emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
+		prescale = priv->bus_freq_mhz * 4;
+
+		if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
+			coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
+
+		if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
+			/*
+			 * Interrupt pacer works with 4us Pulse, we can
+			 * throttle further by dilating the 4us pulse.
+			 */
+			addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
+
+			if (addnl_dvdr > 1) {
+				prescale *= addnl_dvdr;
+				if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
+							* addnl_dvdr))
+					coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
+							* addnl_dvdr);
+			} else {
+				addnl_dvdr = 1;
+				coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
+			}
+		}
+
+		num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+
+		int_ctrl |= EMAC_DM646X_INTPACEEN;
+		int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
+		int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
+		emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
+
+		emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
+		emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
+
+		break;
+	default:
+		int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
+		int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
+		prescale = coal_intvl * priv->bus_freq_mhz;
+		if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
+			prescale = EMAC_DM644X_EWINTCNT_MASK;
+			coal_intvl = prescale / priv->bus_freq_mhz;
+		}
+		emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
+
+		break;
+	}
+
+	printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
+	priv->coal_intvl = coal_intvl;
+
+	return 0;
+
+}
+
+
+/* ethtool_ops: DaVinci EMAC Ethtool structure
+ *
+ * Ethtool support for EMAC adapter
+ */
+static const struct ethtool_ops ethtool_ops = {
+	.get_drvinfo = emac_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_coalesce = emac_get_coalesce,
+	.set_coalesce =  emac_set_coalesce,
+	.get_ts_info = ethtool_op_get_ts_info,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+/**
+ * emac_update_phystatus - Update Phy status
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Updates phy status and takes action for network queue if required
+ * based upon link status
+ *
+ */
+static void emac_update_phystatus(struct emac_priv *priv)
+{
+	u32 mac_control;
+	u32 new_duplex;
+	u32 cur_duplex;
+	struct net_device *ndev = priv->ndev;
+
+	mac_control = emac_read(EMAC_MACCONTROL);
+	cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+	if (ndev->phydev)
+		new_duplex = ndev->phydev->duplex;
+	else
+		new_duplex = DUPLEX_FULL;
+
+	/* We get called only if link has changed (speed/duplex/status) */
+	if ((priv->link) && (new_duplex != cur_duplex)) {
+		priv->duplex = new_duplex;
+		if (DUPLEX_FULL == priv->duplex)
+			mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
+		else
+			mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
+	}
+
+	if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
+		mac_control = emac_read(EMAC_MACCONTROL);
+		mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
+				EMAC_DM646X_MACCONTORL_GIGFORCE);
+	} else {
+		/* Clear the GIG bit and GIGFORCE bit */
+		mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
+					EMAC_DM646X_MACCONTORL_GIG);
+
+		if (priv->rmii_en && (priv->speed == SPEED_100))
+			mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
+		else
+			mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
+	}
+
+	/* Update mac_control if changed */
+	emac_write(EMAC_MACCONTROL, mac_control);
+
+	if (priv->link) {
+		/* link ON */
+		if (!netif_carrier_ok(ndev))
+			netif_carrier_on(ndev);
+	/* reactivate the transmit queue if it is stopped */
+		if (netif_running(ndev) && netif_queue_stopped(ndev))
+			netif_wake_queue(ndev);
+	} else {
+		/* link OFF */
+		if (netif_carrier_ok(ndev))
+			netif_carrier_off(ndev);
+		if (!netif_queue_stopped(ndev))
+			netif_stop_queue(ndev);
+	}
+}
+
+/**
+ * hash_get - Calculate hash value from mac address
+ * @addr: mac address to delete from hash table
+ *
+ * Calculates hash value from mac address
+ *
+ */
+static u32 hash_get(u8 *addr)
+{
+	u32 hash;
+	u8 tmpval;
+	int cnt;
+	hash = 0;
+
+	for (cnt = 0; cnt < 2; cnt++) {
+		tmpval = *addr++;
+		hash ^= (tmpval >> 2) ^ (tmpval << 4);
+		tmpval = *addr++;
+		hash ^= (tmpval >> 4) ^ (tmpval << 2);
+		tmpval = *addr++;
+		hash ^= (tmpval >> 6) ^ (tmpval);
+	}
+
+	return hash & 0x3F;
+}
+
+/**
+ * emac_hash_add - Hash function to add mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * @mac_addr: mac address to delete from hash table
+ *
+ * Adds mac address to the internal hash table
+ *
+ */
+static int emac_hash_add(struct emac_priv *priv, u8 *mac_addr)
+{
+	struct device *emac_dev = &priv->ndev->dev;
+	u32 rc = 0;
+	u32 hash_bit;
+	u32 hash_value = hash_get(mac_addr);
+
+	if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
+		if (netif_msg_drv(priv)) {
+			dev_err(emac_dev, "DaVinci EMAC: emac_hash_add(): Invalid "\
+				"Hash %08x, should not be greater than %08x",
+				hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
+		}
+		return -1;
+	}
+
+	/* set the hash bit only if not previously set */
+	if (priv->multicast_hash_cnt[hash_value] == 0) {
+		rc = 1; /* hash value changed */
+		if (hash_value < 32) {
+			hash_bit = BIT(hash_value);
+			priv->mac_hash1 |= hash_bit;
+		} else {
+			hash_bit = BIT((hash_value - 32));
+			priv->mac_hash2 |= hash_bit;
+		}
+	}
+
+	/* incr counter for num of mcast addr's mapped to "this" hash bit */
+	++priv->multicast_hash_cnt[hash_value];
+
+	return rc;
+}
+
+/**
+ * emac_hash_del - Hash function to delete mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * @mac_addr: mac address to delete from hash table
+ *
+ * Removes mac address from the internal hash table
+ *
+ */
+static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr)
+{
+	u32 hash_value;
+	u32 hash_bit;
+
+	hash_value = hash_get(mac_addr);
+	if (priv->multicast_hash_cnt[hash_value] > 0) {
+		/* dec cntr for num of mcast addr's mapped to this hash bit */
+		--priv->multicast_hash_cnt[hash_value];
+	}
+
+	/* if counter still > 0, at least one multicast address refers
+	 * to this hash bit. so return 0 */
+	if (priv->multicast_hash_cnt[hash_value] > 0)
+		return 0;
+
+	if (hash_value < 32) {
+		hash_bit = BIT(hash_value);
+		priv->mac_hash1 &= ~hash_bit;
+	} else {
+		hash_bit = BIT((hash_value - 32));
+		priv->mac_hash2 &= ~hash_bit;
+	}
+
+	/* return 1 to indicate change in mac_hash registers reqd */
+	return 1;
+}
+
+/* EMAC multicast operation */
+#define EMAC_MULTICAST_ADD	0
+#define EMAC_MULTICAST_DEL	1
+#define EMAC_ALL_MULTI_SET	2
+#define EMAC_ALL_MULTI_CLR	3
+
+/**
+ * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @action: multicast operation to perform
+ * mac_addr: mac address to set
+ *
+ * Set multicast addresses in EMAC adapter - internal function
+ *
+ */
+static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
+{
+	struct device *emac_dev = &priv->ndev->dev;
+	int update = -1;
+
+	switch (action) {
+	case EMAC_MULTICAST_ADD:
+		update = emac_hash_add(priv, mac_addr);
+		break;
+	case EMAC_MULTICAST_DEL:
+		update = emac_hash_del(priv, mac_addr);
+		break;
+	case EMAC_ALL_MULTI_SET:
+		update = 1;
+		priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE;
+		priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE;
+		break;
+	case EMAC_ALL_MULTI_CLR:
+		update = 1;
+		priv->mac_hash1 = 0;
+		priv->mac_hash2 = 0;
+		memset(&(priv->multicast_hash_cnt[0]), 0,
+		sizeof(priv->multicast_hash_cnt[0]) *
+		       EMAC_NUM_MULTICAST_BITS);
+		break;
+	default:
+		if (netif_msg_drv(priv))
+			dev_err(emac_dev, "DaVinci EMAC: add_mcast"\
+				": bad operation %d", action);
+		break;
+	}
+
+	/* write to the hardware only if the register status chances */
+	if (update > 0) {
+		emac_write(EMAC_MACHASH1, priv->mac_hash1);
+		emac_write(EMAC_MACHASH2, priv->mac_hash2);
+	}
+}
+
+/**
+ * emac_dev_mcast_set - Set multicast address in the EMAC adapter
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Set multicast addresses in EMAC adapter
+ *
+ */
+static void emac_dev_mcast_set(struct net_device *ndev)
+{
+	u32 mbp_enable;
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	mbp_enable = emac_read(EMAC_RXMBPENABLE);
+	if (ndev->flags & IFF_PROMISC) {
+		mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH));
+		mbp_enable |= (EMAC_MBP_RXPROMISC);
+	} else {
+		mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
+		if ((ndev->flags & IFF_ALLMULTI) ||
+		    netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+			mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+			emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+		} else if (!netdev_mc_empty(ndev)) {
+			struct netdev_hw_addr *ha;
+
+			mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+			emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+			/* program multicast address list into EMAC hardware */
+			netdev_for_each_mc_addr(ha, ndev) {
+				emac_add_mcast(priv, EMAC_MULTICAST_ADD,
+					       (u8 *) ha->addr);
+			}
+		} else {
+			mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
+			emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+		}
+	}
+	/* Set mbp config register */
+	emac_write(EMAC_RXMBPENABLE, mbp_enable);
+}
+
+/*************************************************************************
+ *  EMAC Hardware manipulation
+ *************************************************************************/
+
+/**
+ * emac_int_disable - Disable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Disable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_disable(struct emac_priv *priv)
+{
+	if (priv->version == EMAC_VERSION_2) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+
+		/* Program C0_Int_En to zero to turn off
+		* interrupts to the CPU */
+		emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
+		emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
+		/* NOTE: Rx Threshold and Misc interrupts are not disabled */
+		if (priv->int_disable)
+			priv->int_disable();
+
+		/* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+		/* ack rxen only then a new pulse will be generated */
+		emac_write(EMAC_DM646X_MACEOIVECTOR,
+			EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+		/* ack txen- only then a new pulse will be generated */
+		emac_write(EMAC_DM646X_MACEOIVECTOR,
+			EMAC_DM646X_MAC_EOI_C0_TXEN);
+
+		local_irq_restore(flags);
+
+	} else {
+		/* Set DM644x control registers for interrupt control */
+		emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
+	}
+}
+
+/**
+ * emac_int_enable - Enable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_enable(struct emac_priv *priv)
+{
+	if (priv->version == EMAC_VERSION_2) {
+		if (priv->int_enable)
+			priv->int_enable();
+
+		emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
+		emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
+
+		/* In addition to turning on interrupt Enable, we need
+		 * ack by writing appropriate values to the EOI
+		 * register */
+
+		/* NOTE: Rx Threshold and Misc interrupts are not enabled */
+	} else {
+		/* Set DM644x control registers for interrupt control */
+		emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
+	}
+}
+
+/**
+ * emac_irq - EMAC interrupt handler
+ * @irq: interrupt number
+ * @dev_id: EMAC network adapter data structure ptr
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_irq(int irq, void *dev_id)
+{
+	struct net_device *ndev = (struct net_device *)dev_id;
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	++priv->isr_count;
+	if (likely(netif_running(priv->ndev))) {
+		emac_int_disable(priv);
+		napi_schedule(&priv->napi);
+	} else {
+		/* we are closing down, so dont process anything */
+	}
+	return IRQ_HANDLED;
+}
+
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
+{
+	struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size);
+	if (WARN_ON(!skb))
+		return NULL;
+	skb_reserve(skb, NET_IP_ALIGN);
+	return skb;
+}
+
+static void emac_rx_handler(void *token, int len, int status)
+{
+	struct sk_buff		*skb = token;
+	struct net_device	*ndev = skb->dev;
+	struct emac_priv	*priv = netdev_priv(ndev);
+	struct device		*emac_dev = &ndev->dev;
+	int			ret;
+
+	/* free and bail if we are shutting down */
+	if (unlikely(!netif_running(ndev))) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* recycle on receive error */
+	if (status < 0) {
+		ndev->stats.rx_errors++;
+		goto recycle;
+	}
+
+	/* feed received packet up the stack */
+	skb_put(skb, len);
+	skb->protocol = eth_type_trans(skb, ndev);
+	netif_receive_skb(skb);
+	ndev->stats.rx_bytes += len;
+	ndev->stats.rx_packets++;
+
+	/* alloc a new packet for receive */
+	skb = emac_rx_alloc(priv);
+	if (!skb) {
+		if (netif_msg_rx_err(priv) && net_ratelimit())
+			dev_err(emac_dev, "failed rx buffer alloc\n");
+		return;
+	}
+
+recycle:
+	ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+			skb_tailroom(skb), 0);
+
+	WARN_ON(ret == -ENOMEM);
+	if (unlikely(ret < 0))
+		dev_kfree_skb_any(skb);
+}
+
+static void emac_tx_handler(void *token, int len, int status)
+{
+	struct sk_buff		*skb = token;
+	struct net_device	*ndev = skb->dev;
+
+	/* Check whether the queue is stopped due to stalled tx dma, if the
+	 * queue is stopped then start the queue as we have free desc for tx
+	 */
+	if (unlikely(netif_queue_stopped(ndev)))
+		netif_wake_queue(ndev);
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += len;
+	dev_kfree_skb_any(skb);
+}
+
+/**
+ * emac_dev_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called by the system to transmit a packet  - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+ */
+static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct device *emac_dev = &ndev->dev;
+	int ret_code;
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	/* If no link, return */
+	if (unlikely(!priv->link)) {
+		if (netif_msg_tx_err(priv) && net_ratelimit())
+			dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
+		goto fail_tx;
+	}
+
+	ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+	if (unlikely(ret_code < 0)) {
+		if (netif_msg_tx_err(priv) && net_ratelimit())
+			dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+		goto fail_tx;
+	}
+
+	skb_tx_timestamp(skb);
+
+	ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+				     0);
+	if (unlikely(ret_code != 0)) {
+		if (netif_msg_tx_err(priv) && net_ratelimit())
+			dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+		goto fail_tx;
+	}
+
+	/* If there is no more tx desc left free then we need to
+	 * tell the kernel to stop sending us tx frames.
+	 */
+	if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
+		netif_stop_queue(ndev);
+
+	return NETDEV_TX_OK;
+
+fail_tx:
+	ndev->stats.tx_dropped++;
+	netif_stop_queue(ndev);
+	return NETDEV_TX_BUSY;
+}
+
+/**
+ * emac_dev_tx_timeout - EMAC Transmit timeout function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire. We teardown the TX channel assuming a hardware
+ * error and re-initialize the TX channel for hardware operation
+ *
+ */
+static void emac_dev_tx_timeout(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct device *emac_dev = &ndev->dev;
+
+	if (netif_msg_tx_err(priv))
+		dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+
+	ndev->stats.tx_errors++;
+	emac_int_disable(priv);
+	cpdma_chan_stop(priv->txchan);
+	cpdma_chan_start(priv->txchan);
+	emac_int_enable(priv);
+}
+
+/**
+ * emac_set_type0addr - Set EMAC Type0 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type0 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+	u32 val;
+	val = ((mac_addr[5] << 8) | (mac_addr[4]));
+	emac_write(EMAC_MACSRCADDRLO, val);
+
+	val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+	       (mac_addr[1] << 8) | (mac_addr[0]));
+	emac_write(EMAC_MACSRCADDRHI, val);
+	val = emac_read(EMAC_RXUNICASTSET);
+	val |= BIT(ch);
+	emac_write(EMAC_RXUNICASTSET, val);
+	val = emac_read(EMAC_RXUNICASTCLEAR);
+	val &= ~BIT(ch);
+	emac_write(EMAC_RXUNICASTCLEAR, val);
+}
+
+/**
+ * emac_set_type1addr - Set EMAC Type1 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type1 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+	u32 val;
+	emac_write(EMAC_MACINDEX, ch);
+	val = ((mac_addr[5] << 8) | mac_addr[4]);
+	emac_write(EMAC_MACADDRLO, val);
+	val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+	       (mac_addr[1] << 8) | (mac_addr[0]));
+	emac_write(EMAC_MACADDRHI, val);
+	emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_set_type2addr - Set EMAC Type2 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ * @index: index into RX address entries
+ * @match: match parameter for RX address matching logic
+ *
+ * Called internally to set Type2 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
+			       char *mac_addr, int index, int match)
+{
+	u32 val;
+	emac_write(EMAC_MACINDEX, index);
+	val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+	       (mac_addr[1] << 8) | (mac_addr[0]));
+	emac_write(EMAC_MACADDRHI, val);
+	val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
+	       (match << 19) | BIT(20));
+	emac_write(EMAC_MACADDRLO, val);
+	emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_setmac - Set mac address in the adapter (internal function)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+	struct device *emac_dev = &priv->ndev->dev;
+
+	if (priv->rx_addr_type == 0) {
+		emac_set_type0addr(priv, ch, mac_addr);
+	} else if (priv->rx_addr_type == 1) {
+		u32 cnt;
+		for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
+			emac_set_type1addr(priv, ch, mac_addr);
+	} else if (priv->rx_addr_type == 2) {
+		emac_set_type2addr(priv, ch, mac_addr, ch, 1);
+		emac_set_type0addr(priv, ch, mac_addr);
+	} else {
+		if (netif_msg_drv(priv))
+			dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n");
+	}
+}
+
+/**
+ * emac_dev_setmac_addr - Set mac address in the adapter
+ * @ndev: The DaVinci EMAC network adapter
+ * @addr: MAC address to set in device
+ *
+ * Called by the system to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct device *emac_dev = &priv->ndev->dev;
+	struct sockaddr *sa = addr;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	/* Store mac addr in priv and rx channel and set it in EMAC hw */
+	memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+	memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+
+	/* MAC address is configured only after the interface is enabled. */
+	if (netif_running(ndev)) {
+		emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+	}
+
+	if (netif_msg_drv(priv))
+		dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
+					priv->mac_addr);
+
+	return 0;
+}
+
+/**
+ * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enables EMAC hardware for packet processing - enables PHY, enables RX
+ * for packet reception and enables device interrupts and then NAPI
+ *
+ * Returns success (0) or appropriate error code (none right now)
+ */
+static int emac_hw_enable(struct emac_priv *priv)
+{
+	u32 val, mbp_enable, mac_control;
+
+	/* Soft reset */
+	emac_write(EMAC_SOFTRESET, 1);
+	while (emac_read(EMAC_SOFTRESET))
+		cpu_relax();
+
+	/* Disable interrupt & Set pacing for more interrupts initially */
+	emac_int_disable(priv);
+
+	/* Full duplex enable bit set when auto negotiation happens */
+	mac_control =
+		(((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
+		((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
+		((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
+		((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
+	emac_write(EMAC_MACCONTROL, mac_control);
+
+	mbp_enable =
+		(((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
+		((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
+		 ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
+		 ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
+		 ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
+			EMAC_RXMBP_PROMCH_SHIFT) |
+		 ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
+		 ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
+			EMAC_RXMBP_BROADCH_SHIFT) |
+		 ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
+		 ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
+			EMAC_RXMBP_MULTICH_SHIFT));
+	emac_write(EMAC_RXMBPENABLE, mbp_enable);
+	emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
+				   EMAC_RX_MAX_LEN_MASK));
+	emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
+					 EMAC_RX_BUFFER_OFFSET_MASK));
+	emac_write(EMAC_RXFILTERLOWTHRESH, 0);
+	emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
+	priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
+
+	emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
+
+	emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+
+	/* Enable MII */
+	val = emac_read(EMAC_MACCONTROL);
+	val |= (EMAC_MACCONTROL_GMIIEN);
+	emac_write(EMAC_MACCONTROL, val);
+
+	/* Enable NAPI and interrupts */
+	napi_enable(&priv->napi);
+	emac_int_enable(priv);
+	return 0;
+
+}
+
+/**
+ * emac_poll - EMAC NAPI Poll function
+ * @ndev: The DaVinci EMAC network adapter
+ * @budget: Number of receive packets to process (as told by NAPI layer)
+ *
+ * NAPI Poll function implemented to process packets as per budget. We check
+ * the type of interrupt on the device and accordingly call the TX or RX
+ * packet processing functions. We follow the budget for RX processing and
+ * also put a cap on number of TX pkts processed through config param. The
+ * NAPI schedule function is called if more packets pending.
+ *
+ * Returns number of packets received (in most cases; else TX pkts - rarely)
+ */
+static int emac_poll(struct napi_struct *napi, int budget)
+{
+	unsigned int mask;
+	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+	struct net_device *ndev = priv->ndev;
+	struct device *emac_dev = &ndev->dev;
+	u32 status = 0;
+	u32 num_tx_pkts = 0, num_rx_pkts = 0;
+
+	/* Check interrupt vectors and call packet processing */
+	status = emac_read(EMAC_MACINVECTOR);
+
+	mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
+
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
+
+	if (status & mask) {
+		num_tx_pkts = cpdma_chan_process(priv->txchan,
+					      EMAC_DEF_TX_MAX_SERVICE);
+	} /* TX processing */
+
+	mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
+
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
+
+	if (status & mask) {
+		num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
+	} /* RX processing */
+
+	mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+	if (unlikely(status & mask)) {
+		u32 ch, cause;
+		dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
+		netif_stop_queue(ndev);
+		napi_disable(&priv->napi);
+
+		status = emac_read(EMAC_MACSTATUS);
+		cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
+			 EMAC_MACSTATUS_TXERRCODE_SHIFT);
+		if (cause) {
+			ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
+			      EMAC_MACSTATUS_TXERRCH_SHIFT);
+			if (net_ratelimit()) {
+				dev_err(emac_dev, "TX Host error %s on ch=%d\n",
+					&emac_txhost_errcodes[cause][0], ch);
+			}
+		}
+		cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
+			 EMAC_MACSTATUS_RXERRCODE_SHIFT);
+		if (cause) {
+			ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
+			      EMAC_MACSTATUS_RXERRCH_SHIFT);
+			if (netif_msg_hw(priv) && net_ratelimit())
+				dev_err(emac_dev, "RX Host error %s on ch=%d\n",
+					&emac_rxhost_errcodes[cause][0], ch);
+		}
+	} else if (num_rx_pkts < budget) {
+		napi_complete_done(napi, num_rx_pkts);
+		emac_int_enable(priv);
+	}
+
+	return num_rx_pkts;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * emac_poll_controller - EMAC Poll controller function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+static void emac_poll_controller(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	emac_int_disable(priv);
+	emac_irq(ndev->irq, ndev);
+	emac_int_enable(priv);
+}
+#endif
+
+static void emac_adjust_link(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+	unsigned long flags;
+	int new_state = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (phydev->link) {
+		/* check the mode of operation - full/half duplex */
+		if (phydev->duplex != priv->duplex) {
+			new_state = 1;
+			priv->duplex = phydev->duplex;
+		}
+		if (phydev->speed != priv->speed) {
+			new_state = 1;
+			priv->speed = phydev->speed;
+		}
+		if (!priv->link) {
+			new_state = 1;
+			priv->link = 1;
+		}
+
+	} else if (priv->link) {
+		new_state = 1;
+		priv->link = 0;
+		priv->speed = 0;
+		priv->duplex = ~0;
+	}
+	if (new_state) {
+		emac_update_phystatus(priv);
+		phy_print_status(ndev->phydev);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*************************************************************************
+ *  Linux Driver Model
+ *************************************************************************/
+
+/**
+ * emac_devioctl - EMAC adapter ioctl
+ * @ndev: The DaVinci EMAC network adapter
+ * @ifrq: request parameter
+ * @cmd: command parameter
+ *
+ * EMAC driver ioctl function
+ *
+ * Returns success(0) or appropriate error code
+ */
+static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+{
+	if (!(netif_running(ndev)))
+		return -EINVAL;
+
+	/* TODO: Add phy read and write and private statistics get feature */
+
+	if (ndev->phydev)
+		return phy_mii_ioctl(ndev->phydev, ifrq, cmd);
+	else
+		return -EOPNOTSUPP;
+}
+
+static int match_first_device(struct device *dev, void *data)
+{
+	if (dev->parent && dev->parent->of_node)
+		return of_device_is_compatible(dev->parent->of_node,
+					       "ti,davinci_mdio");
+
+	return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
+/**
+ * emac_dev_open - EMAC device open
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to start the interface. We init TX/RX channels
+ * and enable the hardware for packet reception/transmission and start the
+ * network queue.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_dev_open(struct net_device *ndev)
+{
+	struct device *emac_dev = &ndev->dev;
+	u32 cnt;
+	struct resource *res;
+	int q, m, ret;
+	int res_num = 0, irq_num = 0;
+	int i = 0;
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct phy_device *phydev = NULL;
+	struct device *phy = NULL;
+
+	ret = pm_runtime_get_sync(&priv->pdev->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(&priv->pdev->dev);
+		dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	netif_carrier_off(ndev);
+	for (cnt = 0; cnt < ETH_ALEN; cnt++)
+		ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+
+	/* Configuration items */
+	priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
+
+	priv->mac_hash1 = 0;
+	priv->mac_hash2 = 0;
+	emac_write(EMAC_MACHASH1, 0);
+	emac_write(EMAC_MACHASH2, 0);
+
+	for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+		struct sk_buff *skb = emac_rx_alloc(priv);
+
+		if (!skb)
+			break;
+
+		ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+					skb_tailroom(skb), 0);
+		if (WARN_ON(ret < 0))
+			break;
+	}
+
+	/* Request IRQ */
+	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
+					    res_num))) {
+		for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+			if (request_irq(irq_num, emac_irq, 0, ndev->name,
+					ndev)) {
+				dev_err(emac_dev,
+					"DaVinci EMAC: request_irq() failed\n");
+				ret = -EBUSY;
+
+				goto rollback;
+			}
+		}
+		res_num++;
+	}
+	/* prepare counters for rollback in case of an error */
+	res_num--;
+	irq_num--;
+
+	/* Start/Enable EMAC hardware */
+	emac_hw_enable(priv);
+
+	/* Enable Interrupt pacing if configured */
+	if (priv->coal_intvl != 0) {
+		struct ethtool_coalesce coal;
+
+		coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+		emac_set_coalesce(ndev, &coal);
+	}
+
+	cpdma_ctlr_start(priv->dma);
+
+	if (priv->phy_node) {
+		phydev = of_phy_connect(ndev, priv->phy_node,
+					&emac_adjust_link, 0, 0);
+		if (!phydev) {
+			dev_err(emac_dev, "could not connect to phy %pOF\n",
+				priv->phy_node);
+			ret = -ENODEV;
+			goto err;
+		}
+	}
+
+	/* use the first phy on the bus if pdata did not give us a phy id */
+	if (!phydev && !priv->phy_id) {
+		/* NOTE: we can't use bus_find_device_by_name() here because
+		 * the device name is not guaranteed to be 'davinci_mdio'. On
+		 * some systems it can be 'davinci_mdio.0' so we need to use
+		 * strncmp() against the first part of the string to correctly
+		 * match it.
+		 */
+		phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+				      match_first_device);
+		if (phy) {
+			priv->phy_id = dev_name(phy);
+			if (!priv->phy_id || !*priv->phy_id)
+				put_device(phy);
+		}
+	}
+
+	if (!phydev && priv->phy_id && *priv->phy_id) {
+		phydev = phy_connect(ndev, priv->phy_id,
+				     &emac_adjust_link,
+				     PHY_INTERFACE_MODE_MII);
+		put_device(phy);	/* reference taken by bus_find_device */
+		if (IS_ERR(phydev)) {
+			dev_err(emac_dev, "could not connect to phy %s\n",
+				priv->phy_id);
+			ret = PTR_ERR(phydev);
+			goto err;
+		}
+
+		priv->link = 0;
+		priv->speed = 0;
+		priv->duplex = ~0;
+
+		phy_attached_info(phydev);
+	}
+
+	if (!phydev) {
+		/* No PHY , fix the link, speed and duplex settings */
+		dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
+		priv->link = 1;
+		priv->speed = SPEED_100;
+		priv->duplex = DUPLEX_FULL;
+		emac_update_phystatus(priv);
+	}
+
+	if (netif_msg_drv(priv))
+		dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
+
+	if (phydev)
+		phy_start(phydev);
+
+	return 0;
+
+err:
+	emac_int_disable(priv);
+	napi_disable(&priv->napi);
+
+rollback:
+	for (q = res_num; q >= 0; q--) {
+		res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+		/* at the first iteration, irq_num is already set to the
+		 * right value
+		 */
+		if (q != res_num)
+			irq_num = res->end;
+
+		for (m = irq_num; m >= res->start; m--)
+			free_irq(m, ndev);
+	}
+	cpdma_ctlr_stop(priv->dma);
+	pm_runtime_put(&priv->pdev->dev);
+	return ret;
+}
+
+/**
+ * emac_dev_stop - EMAC device stop
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to stop or down the interface. We stop the network
+ * queue, disable interrupts and cleanup TX/RX channels.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static int emac_dev_stop(struct net_device *ndev)
+{
+	struct resource *res;
+	int i = 0;
+	int irq_num;
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct device *emac_dev = &ndev->dev;
+
+	/* inform the upper layers. */
+	netif_stop_queue(ndev);
+	napi_disable(&priv->napi);
+
+	netif_carrier_off(ndev);
+	emac_int_disable(priv);
+	cpdma_ctlr_stop(priv->dma);
+	emac_write(EMAC_SOFTRESET, 1);
+
+	if (ndev->phydev)
+		phy_disconnect(ndev->phydev);
+
+	/* Free IRQ */
+	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+		for (irq_num = res->start; irq_num <= res->end; irq_num++)
+			free_irq(irq_num, priv->ndev);
+		i++;
+	}
+
+	if (netif_msg_drv(priv))
+		dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+
+	pm_runtime_put(&priv->pdev->dev);
+	return 0;
+}
+
+/**
+ * emac_dev_getnetstats - EMAC get statistics function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 mac_control;
+	u32 stats_clear_mask;
+	int err;
+
+	err = pm_runtime_get_sync(&priv->pdev->dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(&priv->pdev->dev);
+		dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+			__func__, err);
+		return &ndev->stats;
+	}
+
+	/* update emac hardware stats and reset the registers*/
+
+	mac_control = emac_read(EMAC_MACCONTROL);
+
+	if (mac_control & EMAC_MACCONTROL_GMIIEN)
+		stats_clear_mask = EMAC_STATS_CLR_MASK;
+	else
+		stats_clear_mask = 0;
+
+	ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+	emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
+
+	ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+					   emac_read(EMAC_TXSINGLECOLL) +
+					   emac_read(EMAC_TXMULTICOLL));
+	emac_write(EMAC_TXCOLLISION, stats_clear_mask);
+	emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
+	emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
+
+	ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+						emac_read(EMAC_RXJABBER) +
+						emac_read(EMAC_RXUNDERSIZED));
+	emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
+	emac_write(EMAC_RXJABBER, stats_clear_mask);
+	emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
+
+	ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+					       emac_read(EMAC_RXMOFOVERRUNS));
+	emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
+	emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
+
+	ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+	emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
+
+	ndev->stats.tx_carrier_errors +=
+		emac_read(EMAC_TXCARRIERSENSE);
+	emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
+
+	ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
+	emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+
+	pm_runtime_put(&priv->pdev->dev);
+
+	return &ndev->stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+	.ndo_open		= emac_dev_open,
+	.ndo_stop		= emac_dev_stop,
+	.ndo_start_xmit		= emac_dev_xmit,
+	.ndo_set_rx_mode	= emac_dev_mcast_set,
+	.ndo_set_mac_address	= emac_dev_setmac_addr,
+	.ndo_do_ioctl		= emac_devioctl,
+	.ndo_tx_timeout		= emac_dev_tx_timeout,
+	.ndo_get_stats		= emac_dev_getnetstats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= emac_poll_controller,
+#endif
+};
+
+static const struct of_device_id davinci_emac_of_match[];
+
+static struct emac_platform_data *
+davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
+{
+	struct device_node *np;
+	const struct of_device_id *match;
+	const struct emac_platform_data *auxdata;
+	struct emac_platform_data *pdata = NULL;
+	const u8 *mac_addr;
+
+	if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+		return dev_get_platdata(&pdev->dev);
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	np = pdev->dev.of_node;
+	pdata->version = EMAC_VERSION_2;
+
+	if (!is_valid_ether_addr(pdata->mac_addr)) {
+		mac_addr = of_get_mac_address(np);
+		if (mac_addr)
+			ether_addr_copy(pdata->mac_addr, mac_addr);
+	}
+
+	of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
+			     &pdata->ctrl_reg_offset);
+
+	of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
+			     &pdata->ctrl_mod_reg_offset);
+
+	of_property_read_u32(np, "ti,davinci-ctrl-ram-offset",
+			     &pdata->ctrl_ram_offset);
+
+	of_property_read_u32(np, "ti,davinci-ctrl-ram-size",
+			     &pdata->ctrl_ram_size);
+
+	of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en);
+
+	pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
+
+	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+	if (!priv->phy_node) {
+		if (!of_phy_is_fixed_link(np))
+			pdata->phy_id = NULL;
+		else if (of_phy_register_fixed_link(np) >= 0)
+			priv->phy_node = of_node_get(np);
+	}
+
+	auxdata = pdev->dev.platform_data;
+	if (auxdata) {
+		pdata->interrupt_enable = auxdata->interrupt_enable;
+		pdata->interrupt_disable = auxdata->interrupt_disable;
+	}
+
+	match = of_match_device(davinci_emac_of_match, &pdev->dev);
+	if (match && match->data) {
+		auxdata = match->data;
+		pdata->version = auxdata->version;
+		pdata->hw_ram_addr = auxdata->hw_ram_addr;
+	}
+
+	return  pdata;
+}
+
+static int davinci_emac_try_get_mac(struct platform_device *pdev,
+				    int instance, u8 *mac_addr)
+{
+	if (!pdev->dev.of_node)
+		return -EINVAL;
+
+	return ti_cm_get_macid(&pdev->dev, instance, mac_addr);
+}
+
+/**
+ * davinci_emac_probe - EMAC device probe
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when probing for emac devicesr. We get details of instances and
+ * resource information from platform init and register a network device
+ * and allocate resources necessary for driver to perform
+ */
+static int davinci_emac_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int rc = 0;
+	struct resource *res, *res_ctrl;
+	struct net_device *ndev;
+	struct emac_priv *priv;
+	unsigned long hw_ram_addr;
+	struct emac_platform_data *pdata;
+	struct cpdma_params dma_params;
+	struct clk *emac_clk;
+	unsigned long emac_bus_frequency;
+
+
+	/* obtain emac clock from kernel */
+	emac_clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(emac_clk)) {
+		dev_err(&pdev->dev, "failed to get EMAC clock\n");
+		return -EBUSY;
+	}
+	emac_bus_frequency = clk_get_rate(emac_clk);
+	devm_clk_put(&pdev->dev, emac_clk);
+
+	/* TODO: Probe PHY here if possible */
+
+	ndev = alloc_etherdev(sizeof(struct emac_priv));
+	if (!ndev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	priv->pdev = pdev;
+	priv->ndev = ndev;
+	priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
+
+	spin_lock_init(&priv->lock);
+
+	pdata = davinci_emac_of_get_pdata(pdev, priv);
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data\n");
+		rc = -ENODEV;
+		goto err_free_netdev;
+	}
+
+	/* MAC addr and PHY mask , RMII enable info from platform_data */
+	memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
+	priv->phy_id = pdata->phy_id;
+	priv->rmii_en = pdata->rmii_en;
+	priv->version = pdata->version;
+	priv->int_enable = pdata->interrupt_enable;
+	priv->int_disable = pdata->interrupt_disable;
+
+	priv->coal_intvl = 0;
+	priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
+
+	/* Get EMAC platform data */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
+	priv->remap_addr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->remap_addr)) {
+		rc = PTR_ERR(priv->remap_addr);
+		goto no_pdata;
+	}
+
+	res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res_ctrl) {
+		priv->ctrl_base =
+			devm_ioremap_resource(&pdev->dev, res_ctrl);
+		if (IS_ERR(priv->ctrl_base)) {
+			rc = PTR_ERR(priv->ctrl_base);
+			goto no_pdata;
+		}
+	} else {
+		priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+	}
+
+	priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
+	ndev->base_addr = (unsigned long)priv->remap_addr;
+
+	hw_ram_addr = pdata->hw_ram_addr;
+	if (!hw_ram_addr)
+		hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+	memset(&dma_params, 0, sizeof(dma_params));
+	dma_params.dev			= &pdev->dev;
+	dma_params.dmaregs		= priv->emac_base;
+	dma_params.rxthresh		= priv->emac_base + 0x120;
+	dma_params.rxfree		= priv->emac_base + 0x140;
+	dma_params.txhdp		= priv->emac_base + 0x600;
+	dma_params.rxhdp		= priv->emac_base + 0x620;
+	dma_params.txcp			= priv->emac_base + 0x640;
+	dma_params.rxcp			= priv->emac_base + 0x660;
+	dma_params.num_chan		= EMAC_MAX_TXRX_CHANNELS;
+	dma_params.min_packet_size	= EMAC_DEF_MIN_ETHPKTSIZE;
+	dma_params.desc_hw_addr		= hw_ram_addr;
+	dma_params.desc_mem_size	= pdata->ctrl_ram_size;
+	dma_params.desc_align		= 16;
+
+	dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
+			(u32 __force)res->start + pdata->ctrl_ram_offset;
+
+	priv->dma = cpdma_ctlr_create(&dma_params);
+	if (!priv->dma) {
+		dev_err(&pdev->dev, "error initializing DMA\n");
+		rc = -ENOMEM;
+		goto no_pdata;
+	}
+
+	priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
+					 emac_tx_handler, 0);
+	if (IS_ERR(priv->txchan)) {
+		dev_err(&pdev->dev, "error initializing tx dma channel\n");
+		rc = PTR_ERR(priv->txchan);
+		goto err_free_dma;
+	}
+
+	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
+					 emac_rx_handler, 1);
+	if (IS_ERR(priv->rxchan)) {
+		dev_err(&pdev->dev, "error initializing rx dma channel\n");
+		rc = PTR_ERR(priv->rxchan);
+		goto err_free_txchan;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "error getting irq res\n");
+		rc = -ENOENT;
+		goto err_free_rxchan;
+	}
+	ndev->irq = res->start;
+
+	rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
+	if (!rc)
+		ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+
+	if (!is_valid_ether_addr(priv->mac_addr)) {
+		/* Use random MAC if none passed */
+		eth_hw_addr_random(ndev);
+		memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+		dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+							priv->mac_addr);
+	}
+
+	ndev->netdev_ops = &emac_netdev_ops;
+	ndev->ethtool_ops = &ethtool_ops;
+	netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+
+	pm_runtime_enable(&pdev->dev);
+	rc = pm_runtime_get_sync(&pdev->dev);
+	if (rc < 0) {
+		pm_runtime_put_noidle(&pdev->dev);
+		dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+			__func__, rc);
+		goto err_napi_del;
+	}
+
+	/* register the network device */
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	rc = register_netdev(ndev);
+	if (rc) {
+		dev_err(&pdev->dev, "error in register_netdev\n");
+		rc = -ENODEV;
+		pm_runtime_put(&pdev->dev);
+		goto err_napi_del;
+	}
+
+
+	if (netif_msg_probe(priv)) {
+		dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
+			   "(regs: %pa, irq: %d)\n",
+			   &priv->emac_base_phys, ndev->irq);
+	}
+	pm_runtime_put(&pdev->dev);
+
+	return 0;
+
+err_napi_del:
+	netif_napi_del(&priv->napi);
+err_free_rxchan:
+	cpdma_chan_destroy(priv->rxchan);
+err_free_txchan:
+	cpdma_chan_destroy(priv->txchan);
+err_free_dma:
+	cpdma_ctlr_destroy(priv->dma);
+no_pdata:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	of_node_put(priv->phy_node);
+err_free_netdev:
+	free_netdev(ndev);
+	return rc;
+}
+
+/**
+ * davinci_emac_remove - EMAC device remove
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when removing the device driver. We disable clock usage and release
+ * the resources taken up by the driver and unregister network device
+ */
+static int davinci_emac_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct device_node *np = pdev->dev.of_node;
+
+	dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
+
+	if (priv->txchan)
+		cpdma_chan_destroy(priv->txchan);
+	if (priv->rxchan)
+		cpdma_chan_destroy(priv->rxchan);
+	cpdma_ctlr_destroy(priv->dma);
+
+	unregister_netdev(ndev);
+	of_node_put(priv->phy_node);
+	pm_runtime_disable(&pdev->dev);
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	free_netdev(ndev);
+
+	return 0;
+}
+
+static int davinci_emac_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+
+	if (netif_running(ndev))
+		emac_dev_stop(ndev);
+
+	return 0;
+}
+
+static int davinci_emac_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+
+	if (netif_running(ndev))
+		emac_dev_open(ndev);
+
+	return 0;
+}
+
+static const struct dev_pm_ops davinci_emac_pm_ops = {
+	.suspend	= davinci_emac_suspend,
+	.resume		= davinci_emac_resume,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct emac_platform_data am3517_emac_data = {
+	.version		= EMAC_VERSION_2,
+	.hw_ram_addr		= 0x01e20000,
+};
+
+static const struct emac_platform_data dm816_emac_data = {
+	.version		= EMAC_VERSION_2,
+};
+
+static const struct of_device_id davinci_emac_of_match[] = {
+	{.compatible = "ti,davinci-dm6467-emac", },
+	{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+	{.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
+#endif
+
+/* davinci_emac_driver: EMAC platform driver structure */
+static struct platform_driver davinci_emac_driver = {
+	.driver = {
+		.name	 = "davinci_emac",
+		.pm	 = &davinci_emac_pm_ops,
+		.of_match_table = of_match_ptr(davinci_emac_of_match),
+	},
+	.probe = davinci_emac_probe,
+	.remove = davinci_emac_remove,
+};
+
+/**
+ * davinci_emac_init - EMAC driver module init
+ *
+ * Called when initializing the driver. We register the driver with
+ * the platform.
+ */
+static int __init davinci_emac_init(void)
+{
+	return platform_driver_register(&davinci_emac_driver);
+}
+late_initcall(davinci_emac_init);
+
+/**
+ * davinci_emac_exit - EMAC driver module exit
+ *
+ * Called when exiting the driver completely. We unregister the driver with
+ * the platform and exit
+ */
+static void __exit davinci_emac_exit(void)
+{
+	platform_driver_unregister(&davinci_emac_driver);
+}
+module_exit(davinci_emac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>");
+MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
new file mode 100644
index 0000000..a98aeda
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -0,0 +1,553 @@
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/pinctrl/consumer.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups.  Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT		100 /* msecs */
+
+#define PHY_REG_MASK		0x1f
+#define PHY_ID_MASK		0x1f
+
+#define DEF_OUT_FREQ		2200000		/* 2.2 MHz */
+
+struct davinci_mdio_of_param {
+	int autosuspend_delay_ms;
+};
+
+struct davinci_mdio_regs {
+	u32	version;
+	u32	control;
+#define CONTROL_IDLE		BIT(31)
+#define CONTROL_ENABLE		BIT(30)
+#define CONTROL_MAX_DIV		(0xffff)
+
+	u32	alive;
+	u32	link;
+	u32	linkintraw;
+	u32	linkintmasked;
+	u32	__reserved_0[2];
+	u32	userintraw;
+	u32	userintmasked;
+	u32	userintmaskset;
+	u32	userintmaskclr;
+	u32	__reserved_1[20];
+
+	struct {
+		u32	access;
+#define USERACCESS_GO		BIT(31)
+#define USERACCESS_WRITE	BIT(30)
+#define USERACCESS_ACK		BIT(29)
+#define USERACCESS_READ		(0)
+#define USERACCESS_DATA		(0xffff)
+
+		u32	physel;
+	}	user[0];
+};
+
+static const struct mdio_platform_data default_pdata = {
+	.bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+	struct mdio_platform_data pdata;
+	struct davinci_mdio_regs __iomem *regs;
+	struct clk	*clk;
+	struct device	*dev;
+	struct mii_bus	*bus;
+	bool            active_in_suspend;
+	unsigned long	access_time; /* jiffies */
+	/* Indicates that driver shouldn't modify phy_mask in case
+	 * if MDIO bus is registered from DT.
+	 */
+	bool		skip_scan;
+	u32		clk_div;
+};
+
+static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
+{
+	u32 mdio_in, div, mdio_out_khz, access_time;
+
+	mdio_in = clk_get_rate(data->clk);
+	div = (mdio_in / data->pdata.bus_freq) - 1;
+	if (div > CONTROL_MAX_DIV)
+		div = CONTROL_MAX_DIV;
+
+	data->clk_div = div;
+	/*
+	 * One mdio transaction consists of:
+	 *	32 bits of preamble
+	 *	32 bits of transferred data
+	 *	24 bits of bus yield (not needed unless shared?)
+	 */
+	mdio_out_khz = mdio_in / (1000 * (div + 1));
+	access_time  = (88 * 1000) / mdio_out_khz;
+
+	/*
+	 * In the worst case, we could be kicking off a user-access immediately
+	 * after the mdio bus scan state-machine triggered its own read.  If
+	 * so, our request could get deferred by one access cycle.  We
+	 * defensively allow for 4 access cycles.
+	 */
+	data->access_time = usecs_to_jiffies(access_time * 4);
+	if (!data->access_time)
+		data->access_time = 1;
+}
+
+static void davinci_mdio_enable(struct davinci_mdio_data *data)
+{
+	/* set enable and clock divider */
+	__raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+	struct davinci_mdio_data *data = bus->priv;
+	u32 phy_mask, ver;
+	int ret;
+
+	ret = pm_runtime_get_sync(data->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(data->dev);
+		return ret;
+	}
+
+	/* wait for scan logic to settle */
+	msleep(PHY_MAX_ADDR * data->access_time);
+
+	/* dump hardware version info */
+	ver = __raw_readl(&data->regs->version);
+	dev_info(data->dev,
+		 "davinci mdio revision %d.%d, bus freq %ld\n",
+		 (ver >> 8) & 0xff, ver & 0xff,
+		 data->pdata.bus_freq);
+
+	if (data->skip_scan)
+		goto done;
+
+	/* get phy mask from the alive register */
+	phy_mask = __raw_readl(&data->regs->alive);
+	if (phy_mask) {
+		/* restrict mdio bus to live phys only */
+		dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+		phy_mask = ~phy_mask;
+	} else {
+		/* desperately scan all phys */
+		dev_warn(data->dev, "no live phy, scanning all\n");
+		phy_mask = 0;
+	}
+	data->bus->phy_mask = phy_mask;
+
+done:
+	pm_runtime_mark_last_busy(data->dev);
+	pm_runtime_put_autosuspend(data->dev);
+
+	return 0;
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+	struct davinci_mdio_regs __iomem *regs = data->regs;
+	unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+	u32 reg;
+
+	while (time_after(timeout, jiffies)) {
+		reg = __raw_readl(&regs->user[0].access);
+		if ((reg & USERACCESS_GO) == 0)
+			return 0;
+
+		reg = __raw_readl(&regs->control);
+		if ((reg & CONTROL_IDLE) == 0) {
+			usleep_range(100, 200);
+			continue;
+		}
+
+		/*
+		 * An emac soft_reset may have clobbered the mdio controller's
+		 * state machine.  We need to reset and retry the current
+		 * operation
+		 */
+		dev_warn(data->dev, "resetting idled controller\n");
+		davinci_mdio_enable(data);
+		return -EAGAIN;
+	}
+
+	reg = __raw_readl(&regs->user[0].access);
+	if ((reg & USERACCESS_GO) == 0)
+		return 0;
+
+	dev_err(data->dev, "timed out waiting for user access\n");
+	return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+	struct davinci_mdio_regs __iomem *regs = data->regs;
+	u32 val, ret;
+
+	ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
+				 0, MDIO_TIMEOUT * 1000);
+	if (ret)
+		dev_err(data->dev, "timed out waiting for idle\n");
+
+	return ret;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+	struct davinci_mdio_data *data = bus->priv;
+	u32 reg;
+	int ret;
+
+	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(data->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(data->dev);
+		return ret;
+	}
+
+	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+	       (phy_id << 16));
+
+	while (1) {
+		ret = wait_for_user_access(data);
+		if (ret == -EAGAIN)
+			continue;
+		if (ret < 0)
+			break;
+
+		__raw_writel(reg, &data->regs->user[0].access);
+
+		ret = wait_for_user_access(data);
+		if (ret == -EAGAIN)
+			continue;
+		if (ret < 0)
+			break;
+
+		reg = __raw_readl(&data->regs->user[0].access);
+		ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+		break;
+	}
+
+	pm_runtime_mark_last_busy(data->dev);
+	pm_runtime_put_autosuspend(data->dev);
+	return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+			      int phy_reg, u16 phy_data)
+{
+	struct davinci_mdio_data *data = bus->priv;
+	u32 reg;
+	int ret;
+
+	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(data->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(data->dev);
+		return ret;
+	}
+
+	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+		   (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+	while (1) {
+		ret = wait_for_user_access(data);
+		if (ret == -EAGAIN)
+			continue;
+		if (ret < 0)
+			break;
+
+		__raw_writel(reg, &data->regs->user[0].access);
+
+		ret = wait_for_user_access(data);
+		if (ret == -EAGAIN)
+			continue;
+		break;
+	}
+
+	pm_runtime_mark_last_busy(data->dev);
+	pm_runtime_put_autosuspend(data->dev);
+
+	return ret;
+}
+
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+			 struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	u32 prop;
+
+	if (!node)
+		return -EINVAL;
+
+	if (of_property_read_u32(node, "bus_freq", &prop)) {
+		dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
+		return -EINVAL;
+	}
+	data->bus_freq = prop;
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
+	.autosuspend_delay_ms = 100,
+};
+
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+	{ .compatible = "ti,davinci_mdio", },
+	{ .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+#endif
+
+static int davinci_mdio_probe(struct platform_device *pdev)
+{
+	struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct device *dev = &pdev->dev;
+	struct davinci_mdio_data *data;
+	struct resource *res;
+	struct phy_device *phy;
+	int ret, addr;
+	int autosuspend_delay_ms = -1;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->bus = devm_mdiobus_alloc(dev);
+	if (!data->bus) {
+		dev_err(dev, "failed to alloc mii bus\n");
+		return -ENOMEM;
+	}
+
+	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+		const struct of_device_id	*of_id;
+
+		ret = davinci_mdio_probe_dt(&data->pdata, pdev);
+		if (ret)
+			return ret;
+		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+
+		of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
+		if (of_id) {
+			const struct davinci_mdio_of_param *of_mdio_data;
+
+			of_mdio_data = of_id->data;
+			if (of_mdio_data)
+				autosuspend_delay_ms =
+					of_mdio_data->autosuspend_delay_ms;
+		}
+	} else {
+		data->pdata = pdata ? (*pdata) : default_pdata;
+		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+			 pdev->name, pdev->id);
+	}
+
+	data->bus->name		= dev_name(dev);
+	data->bus->read		= davinci_mdio_read,
+	data->bus->write	= davinci_mdio_write,
+	data->bus->reset	= davinci_mdio_reset,
+	data->bus->parent	= dev;
+	data->bus->priv		= data;
+
+	data->clk = devm_clk_get(dev, "fck");
+	if (IS_ERR(data->clk)) {
+		dev_err(dev, "failed to get device clock\n");
+		return PTR_ERR(data->clk);
+	}
+
+	dev_set_drvdata(dev, data);
+	data->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(data->regs))
+		return PTR_ERR(data->regs);
+
+	davinci_mdio_init_clk(data);
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	/* register the mii bus
+	 * Create PHYs from DT only in case if PHY child nodes are explicitly
+	 * defined to support backward compatibility with DTs which assume that
+	 * Davinci MDIO will always scan the bus for PHYs detection.
+	 */
+	if (dev->of_node && of_get_child_count(dev->of_node))
+		data->skip_scan = true;
+
+	ret = of_mdiobus_register(data->bus, dev->of_node);
+	if (ret)
+		goto bail_out;
+
+	/* scan and dump the bus */
+	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+		phy = mdiobus_get_phy(data->bus, addr);
+		if (phy) {
+			dev_info(dev, "phy[%d]: device %s, driver %s\n",
+				 phy->mdio.addr, phydev_name(phy),
+				 phy->drv ? phy->drv->name : "unknown");
+		}
+	}
+
+	return 0;
+
+bail_out:
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return ret;
+}
+
+static int davinci_mdio_remove(struct platform_device *pdev)
+{
+	struct davinci_mdio_data *data = platform_get_drvdata(pdev);
+
+	if (data->bus)
+		mdiobus_unregister(data->bus);
+
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int davinci_mdio_runtime_suspend(struct device *dev)
+{
+	struct davinci_mdio_data *data = dev_get_drvdata(dev);
+	u32 ctrl;
+
+	/* shutdown the scan state machine */
+	ctrl = __raw_readl(&data->regs->control);
+	ctrl &= ~CONTROL_ENABLE;
+	__raw_writel(ctrl, &data->regs->control);
+	wait_for_idle(data);
+
+	return 0;
+}
+
+static int davinci_mdio_runtime_resume(struct device *dev)
+{
+	struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+	davinci_mdio_enable(data);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mdio_suspend(struct device *dev)
+{
+	struct davinci_mdio_data *data = dev_get_drvdata(dev);
+	int ret = 0;
+
+	data->active_in_suspend = !pm_runtime_status_suspended(dev);
+	if (data->active_in_suspend)
+		ret = pm_runtime_force_suspend(dev);
+	if (ret < 0)
+		return ret;
+
+	/* Select sleep pin state */
+	pinctrl_pm_select_sleep_state(dev);
+
+	return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+	struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+	/* Select default pin state */
+	pinctrl_pm_select_default_state(dev);
+
+	if (data->active_in_suspend)
+		pm_runtime_force_resume(dev);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+	SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
+			   davinci_mdio_runtime_resume, NULL)
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
+};
+
+static struct platform_driver davinci_mdio_driver = {
+	.driver = {
+		.name	 = "davinci_mdio",
+		.pm	 = &davinci_mdio_pm_ops,
+		.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
+	},
+	.probe = davinci_mdio_probe,
+	.remove = davinci_mdio_remove,
+};
+
+static int __init davinci_mdio_init(void)
+{
+	return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+	platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644
index 0000000..c4ffdf4
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -0,0 +1,256 @@
+/*
+ * NetCP driver local header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:	Sandeep Nair <sandeep_n@ti.com>
+ *		Sandeep Paulraj <s-paulraj@ti.com>
+ *		Cyril Chemparathy <cyril@ti.com>
+ *		Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *		Wingman Kwok <w-kwok2@ti.com>
+ *		Murali Karicheri <m-karicheri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __NETCP_H__
+#define __NETCP_H__
+
+#include <linux/netdevice.h>
+#include <linux/soc/ti/knav_dma.h>
+#include <linux/u64_stats_sync.h>
+
+/* Maximum Ethernet frame size supported by Keystone switch */
+#define NETCP_MAX_FRAME_SIZE		9504
+
+#define SGMII_LINK_MAC_MAC_AUTONEG	0
+#define SGMII_LINK_MAC_PHY		1
+#define SGMII_LINK_MAC_MAC_FORCED	2
+#define SGMII_LINK_MAC_FIBER		3
+#define SGMII_LINK_MAC_PHY_NO_MDIO	4
+#define RGMII_LINK_MAC_PHY		5
+#define RGMII_LINK_MAC_PHY_NO_MDIO	7
+#define XGMII_LINK_MAC_PHY		10
+#define XGMII_LINK_MAC_MAC_FORCED	11
+
+struct netcp_device;
+
+struct netcp_tx_pipe {
+	struct netcp_device	*netcp_device;
+	void			*dma_queue;
+	unsigned int		dma_queue_id;
+	/* To port for packet forwarded to switch. Used only by ethss */
+	u8			switch_to_port;
+#define	SWITCH_TO_PORT_IN_TAGINFO	BIT(0)
+	u8			flags;
+	void			*dma_channel;
+	const char		*dma_chan_name;
+};
+
+#define ADDR_NEW			BIT(0)
+#define ADDR_VALID			BIT(1)
+
+enum netcp_addr_type {
+	ADDR_ANY,
+	ADDR_DEV,
+	ADDR_UCAST,
+	ADDR_MCAST,
+	ADDR_BCAST
+};
+
+struct netcp_addr {
+	struct netcp_intf	*netcp;
+	unsigned char		addr[ETH_ALEN];
+	enum netcp_addr_type	type;
+	unsigned int		flags;
+	struct list_head	node;
+};
+
+struct netcp_stats {
+	struct u64_stats_sync   syncp_rx ____cacheline_aligned_in_smp;
+	u64                     rx_packets;
+	u64                     rx_bytes;
+	u32                     rx_errors;
+	u32                     rx_dropped;
+
+	struct u64_stats_sync   syncp_tx ____cacheline_aligned_in_smp;
+	u64                     tx_packets;
+	u64                     tx_bytes;
+	u32                     tx_errors;
+	u32                     tx_dropped;
+};
+
+struct netcp_intf {
+	struct device		*dev;
+	struct device		*ndev_dev;
+	struct net_device	*ndev;
+	bool			big_endian;
+	unsigned int		tx_compl_qid;
+	void			*tx_pool;
+	struct list_head	txhook_list_head;
+	unsigned int		tx_pause_threshold;
+	void			*tx_compl_q;
+
+	unsigned int		tx_resume_threshold;
+	void			*rx_queue;
+	void			*rx_pool;
+	struct list_head	rxhook_list_head;
+	unsigned int		rx_queue_id;
+	void			*rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
+	struct napi_struct	rx_napi;
+	struct napi_struct	tx_napi;
+#define ETH_SW_CAN_REMOVE_ETH_FCS	BIT(0)
+	u32			hw_cap;
+
+	/* 64-bit netcp stats */
+	struct netcp_stats	stats;
+
+	void			*rx_channel;
+	const char		*dma_chan_name;
+	u32			rx_pool_size;
+	u32			rx_pool_region_id;
+	u32			tx_pool_size;
+	u32			tx_pool_region_id;
+	struct list_head	module_head;
+	struct list_head	interface_list;
+	struct list_head	addr_list;
+	bool			netdev_registered;
+	bool			primary_module_attached;
+
+	/* Lock used for protecting Rx/Tx hook list management */
+	spinlock_t		lock;
+	struct netcp_device	*netcp_device;
+	struct device_node	*node_interface;
+
+	/* DMA configuration data */
+	u32			msg_enable;
+	u32			rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
+};
+
+#define	NETCP_PSDATA_LEN		KNAV_DMA_NUM_PS_WORDS
+struct netcp_packet {
+	struct sk_buff		*skb;
+	__le32			*epib;
+	u32			*psdata;
+	u32			eflags;
+	unsigned int		psdata_len;
+	struct netcp_intf	*netcp;
+	struct netcp_tx_pipe	*tx_pipe;
+	bool			rxtstamp_complete;
+	void			*ts_context;
+
+	void (*txtstamp)(void *ctx, struct sk_buff *skb);
+};
+
+static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
+				     unsigned int bytes)
+{
+	u32 *buf;
+	unsigned int words;
+
+	if ((bytes & 0x03) != 0)
+		return NULL;
+	words = bytes >> 2;
+
+	if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
+		return NULL;
+
+	p_info->psdata_len += words;
+	buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
+	return buf;
+}
+
+static inline int netcp_align_psdata(struct netcp_packet *p_info,
+				     unsigned int byte_align)
+{
+	int padding;
+
+	switch (byte_align) {
+	case 0:
+		padding = -EINVAL;
+		break;
+	case 1:
+	case 2:
+	case 4:
+		padding = 0;
+		break;
+	case 8:
+		padding = (p_info->psdata_len << 2) % 8;
+		break;
+	case 16:
+		padding = (p_info->psdata_len << 2) % 16;
+		break;
+	default:
+		padding = (p_info->psdata_len << 2) % byte_align;
+		break;
+	}
+	return padding;
+}
+
+struct netcp_module {
+	const char		*name;
+	struct module		*owner;
+	bool			primary;
+
+	/* probe/remove: called once per NETCP instance */
+	int	(*probe)(struct netcp_device *netcp_device,
+			 struct device *device, struct device_node *node,
+			 void **inst_priv);
+	int	(*remove)(struct netcp_device *netcp_device, void *inst_priv);
+
+	/* attach/release: called once per network interface */
+	int	(*attach)(void *inst_priv, struct net_device *ndev,
+			  struct device_node *node, void **intf_priv);
+	int	(*release)(void *intf_priv);
+	int	(*open)(void *intf_priv, struct net_device *ndev);
+	int	(*close)(void *intf_priv, struct net_device *ndev);
+	int	(*add_addr)(void *intf_priv, struct netcp_addr *naddr);
+	int	(*del_addr)(void *intf_priv, struct netcp_addr *naddr);
+	int	(*add_vid)(void *intf_priv, int vid);
+	int	(*del_vid)(void *intf_priv, int vid);
+	int	(*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
+	int	(*set_rx_mode)(void *intf_priv, bool promisc);
+
+	/* used internally */
+	struct list_head	module_list;
+	struct list_head	interface_list;
+};
+
+int netcp_register_module(struct netcp_module *module);
+void netcp_unregister_module(struct netcp_module *module);
+void *netcp_module_get_intf_data(struct netcp_module *module,
+				 struct netcp_intf *intf);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+		      struct netcp_device *netcp_device,
+		      const char *dma_chan_name, unsigned int dma_queue_id);
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
+
+typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+			  netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+			    netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+			  netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+			    netcp_hook_rtn *hook_rtn, void *hook_data);
+void *netcp_device_find_module(struct netcp_device *netcp_device,
+			       const char *name);
+
+/* SGMII functions */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
+
+/* XGBE SERDES init functions */
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
+
+#endif	/* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644
index 0000000..a1d335a
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -0,0 +1,2298 @@
+/*
+ * Keystone NetCP Core driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:	Sandeep Nair <sandeep_n@ti.com>
+ *		Sandeep Paulraj <s-paulraj@ti.com>
+ *		Cyril Chemparathy <cyril@ti.com>
+ *		Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *		Murali Karicheri <m-karicheri2@ti.com>
+ *		Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_qmss.h>
+#include <linux/soc/ti/knav_dma.h>
+
+#include "netcp.h"
+
+#define NETCP_SOP_OFFSET	(NET_IP_ALIGN + NET_SKB_PAD)
+#define NETCP_NAPI_WEIGHT	64
+#define NETCP_TX_TIMEOUT	(5 * HZ)
+#define NETCP_PACKET_SIZE	(ETH_FRAME_LEN + ETH_FCS_LEN)
+#define NETCP_MIN_PACKET_SIZE	ETH_ZLEN
+#define NETCP_MAX_MCAST_ADDR	16
+
+#define NETCP_EFUSE_REG_INDEX	0
+
+#define NETCP_MOD_PROBE_SKIPPED	1
+#define NETCP_MOD_PROBE_FAILED	2
+
+#define NETCP_DEBUG (NETIF_MSG_HW	| NETIF_MSG_WOL		|	\
+		    NETIF_MSG_DRV	| NETIF_MSG_LINK	|	\
+		    NETIF_MSG_IFUP	| NETIF_MSG_INTR	|	\
+		    NETIF_MSG_PROBE	| NETIF_MSG_TIMER	|	\
+		    NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	|	\
+		    NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	|	\
+		    NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	|	\
+		    NETIF_MSG_RX_STATUS)
+
+#define NETCP_EFUSE_ADDR_SWAP	2
+
+#define knav_queue_get_id(q)	knav_queue_device_control(q, \
+				KNAV_QUEUE_GET_ID, (unsigned long)NULL)
+
+#define knav_queue_enable_notify(q) knav_queue_device_control(q,	\
+					KNAV_QUEUE_ENABLE_NOTIFY,	\
+					(unsigned long)NULL)
+
+#define knav_queue_disable_notify(q) knav_queue_device_control(q,	\
+					KNAV_QUEUE_DISABLE_NOTIFY,	\
+					(unsigned long)NULL)
+
+#define knav_queue_get_count(q)	knav_queue_device_control(q, \
+				KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
+
+#define for_each_netcp_module(module)			\
+	list_for_each_entry(module, &netcp_modules, module_list)
+
+#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
+	list_for_each_entry(inst_modpriv, \
+		&((netcp_device)->modpriv_head), inst_list)
+
+#define for_each_module(netcp, intf_modpriv)			\
+	list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
+
+/* Module management structures */
+struct netcp_device {
+	struct list_head	device_list;
+	struct list_head	interface_head;
+	struct list_head	modpriv_head;
+	struct device		*device;
+};
+
+struct netcp_inst_modpriv {
+	struct netcp_device	*netcp_device;
+	struct netcp_module	*netcp_module;
+	struct list_head	inst_list;
+	void			*module_priv;
+};
+
+struct netcp_intf_modpriv {
+	struct netcp_intf	*netcp_priv;
+	struct netcp_module	*netcp_module;
+	struct list_head	intf_list;
+	void			*module_priv;
+};
+
+struct netcp_tx_cb {
+	void	*ts_context;
+	void	(*txtstamp)(void *context, struct sk_buff *skb);
+};
+
+static LIST_HEAD(netcp_devices);
+static LIST_HEAD(netcp_modules);
+static DEFINE_MUTEX(netcp_modules_lock);
+
+static int netcp_debug_level = -1;
+module_param(netcp_debug_level, int, 0);
+MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
+
+/* Helper functions - Get/Set */
+static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
+			 struct knav_dma_desc *desc)
+{
+	*buff_len = le32_to_cpu(desc->buff_len);
+	*buff = le32_to_cpu(desc->buff);
+	*ndesc = le32_to_cpu(desc->next_desc);
+}
+
+static void get_desc_info(u32 *desc_info, u32 *pkt_info,
+			  struct knav_dma_desc *desc)
+{
+	*desc_info = le32_to_cpu(desc->desc_info);
+	*pkt_info = le32_to_cpu(desc->packet_info);
+}
+
+static u32 get_sw_data(int index, struct knav_dma_desc *desc)
+{
+	/* No Endian conversion needed as this data is untouched by hw */
+	return desc->sw_data[index];
+}
+
+/* use these macros to get sw data */
+#define GET_SW_DATA0(desc) get_sw_data(0, desc)
+#define GET_SW_DATA1(desc) get_sw_data(1, desc)
+#define GET_SW_DATA2(desc) get_sw_data(2, desc)
+#define GET_SW_DATA3(desc) get_sw_data(3, desc)
+
+static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
+			     struct knav_dma_desc *desc)
+{
+	*buff = le32_to_cpu(desc->orig_buff);
+	*buff_len = le32_to_cpu(desc->orig_len);
+}
+
+static void get_words(dma_addr_t *words, int num_words, __le32 *desc)
+{
+	int i;
+
+	for (i = 0; i < num_words; i++)
+		words[i] = le32_to_cpu(desc[i]);
+}
+
+static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc,
+			 struct knav_dma_desc *desc)
+{
+	desc->buff_len = cpu_to_le32(buff_len);
+	desc->buff = cpu_to_le32(buff);
+	desc->next_desc = cpu_to_le32(ndesc);
+}
+
+static void set_desc_info(u32 desc_info, u32 pkt_info,
+			  struct knav_dma_desc *desc)
+{
+	desc->desc_info = cpu_to_le32(desc_info);
+	desc->packet_info = cpu_to_le32(pkt_info);
+}
+
+static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
+{
+	/* No Endian conversion needed as this data is untouched by hw */
+	desc->sw_data[index] = data;
+}
+
+/* use these macros to set sw data */
+#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
+#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
+#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
+#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
+
+static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
+			     struct knav_dma_desc *desc)
+{
+	desc->orig_buff = cpu_to_le32(buff);
+	desc->orig_len = cpu_to_le32(buff_len);
+}
+
+static void set_words(u32 *words, int num_words, __le32 *desc)
+{
+	int i;
+
+	for (i = 0; i < num_words; i++)
+		desc[i] = cpu_to_le32(words[i]);
+}
+
+/* Read the e-fuse value as 32 bit values to be endian independent */
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
+{
+	unsigned int addr0, addr1;
+
+	addr1 = readl(efuse_mac + 4);
+	addr0 = readl(efuse_mac);
+
+	switch (swap) {
+	case NETCP_EFUSE_ADDR_SWAP:
+		addr0 = addr1;
+		addr1 = readl(efuse_mac);
+		break;
+	default:
+		break;
+	}
+
+	x[0] = (addr1 & 0x0000ff00) >> 8;
+	x[1] = addr1 & 0x000000ff;
+	x[2] = (addr0 & 0xff000000) >> 24;
+	x[3] = (addr0 & 0x00ff0000) >> 16;
+	x[4] = (addr0 & 0x0000ff00) >> 8;
+	x[5] = addr0 & 0x000000ff;
+
+	return 0;
+}
+
+static const char *netcp_node_name(struct device_node *node)
+{
+	const char *name;
+
+	if (of_property_read_string(node, "label", &name) < 0)
+		name = node->name;
+	if (!name)
+		name = "unknown";
+	return name;
+}
+
+/* Module management routines */
+static int netcp_register_interface(struct netcp_intf *netcp)
+{
+	int ret;
+
+	ret = register_netdev(netcp->ndev);
+	if (!ret)
+		netcp->netdev_registered = true;
+	return ret;
+}
+
+static int netcp_module_probe(struct netcp_device *netcp_device,
+			      struct netcp_module *module)
+{
+	struct device *dev = netcp_device->device;
+	struct device_node *devices, *interface, *node = dev->of_node;
+	struct device_node *child;
+	struct netcp_inst_modpriv *inst_modpriv;
+	struct netcp_intf *netcp_intf;
+	struct netcp_module *tmp;
+	bool primary_module_registered = false;
+	int ret;
+
+	/* Find this module in the sub-tree for this device */
+	devices = of_get_child_by_name(node, "netcp-devices");
+	if (!devices) {
+		dev_err(dev, "could not find netcp-devices node\n");
+		return NETCP_MOD_PROBE_SKIPPED;
+	}
+
+	for_each_available_child_of_node(devices, child) {
+		const char *name = netcp_node_name(child);
+
+		if (!strcasecmp(module->name, name))
+			break;
+	}
+
+	of_node_put(devices);
+	/* If module not used for this device, skip it */
+	if (!child) {
+		dev_warn(dev, "module(%s) not used for device\n", module->name);
+		return NETCP_MOD_PROBE_SKIPPED;
+	}
+
+	inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
+	if (!inst_modpriv) {
+		of_node_put(child);
+		return -ENOMEM;
+	}
+
+	inst_modpriv->netcp_device = netcp_device;
+	inst_modpriv->netcp_module = module;
+	list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
+
+	ret = module->probe(netcp_device, dev, child,
+			    &inst_modpriv->module_priv);
+	of_node_put(child);
+	if (ret) {
+		dev_err(dev, "Probe of module(%s) failed with %d\n",
+			module->name, ret);
+		list_del(&inst_modpriv->inst_list);
+		devm_kfree(dev, inst_modpriv);
+		return NETCP_MOD_PROBE_FAILED;
+	}
+
+	/* Attach modules only if the primary module is probed */
+	for_each_netcp_module(tmp) {
+		if (tmp->primary)
+			primary_module_registered = true;
+	}
+
+	if (!primary_module_registered)
+		return 0;
+
+	/* Attach module to interfaces */
+	list_for_each_entry(netcp_intf, &netcp_device->interface_head,
+			    interface_list) {
+		struct netcp_intf_modpriv *intf_modpriv;
+
+		intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
+					    GFP_KERNEL);
+		if (!intf_modpriv)
+			return -ENOMEM;
+
+		interface = of_parse_phandle(netcp_intf->node_interface,
+					     module->name, 0);
+
+		if (!interface) {
+			devm_kfree(dev, intf_modpriv);
+			continue;
+		}
+
+		intf_modpriv->netcp_priv = netcp_intf;
+		intf_modpriv->netcp_module = module;
+		list_add_tail(&intf_modpriv->intf_list,
+			      &netcp_intf->module_head);
+
+		ret = module->attach(inst_modpriv->module_priv,
+				     netcp_intf->ndev, interface,
+				     &intf_modpriv->module_priv);
+		of_node_put(interface);
+		if (ret) {
+			dev_dbg(dev, "Attach of module %s declined with %d\n",
+				module->name, ret);
+			list_del(&intf_modpriv->intf_list);
+			devm_kfree(dev, intf_modpriv);
+			continue;
+		}
+	}
+
+	/* Now register the interface with netdev */
+	list_for_each_entry(netcp_intf,
+			    &netcp_device->interface_head,
+			    interface_list) {
+		/* If interface not registered then register now */
+		if (!netcp_intf->netdev_registered) {
+			ret = netcp_register_interface(netcp_intf);
+			if (ret)
+				return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+int netcp_register_module(struct netcp_module *module)
+{
+	struct netcp_device *netcp_device;
+	struct netcp_module *tmp;
+	int ret;
+
+	if (!module->name) {
+		WARN(1, "error registering netcp module: no name\n");
+		return -EINVAL;
+	}
+
+	if (!module->probe) {
+		WARN(1, "error registering netcp module: no probe\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&netcp_modules_lock);
+
+	for_each_netcp_module(tmp) {
+		if (!strcasecmp(tmp->name, module->name)) {
+			mutex_unlock(&netcp_modules_lock);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&module->module_list, &netcp_modules);
+
+	list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+		ret = netcp_module_probe(netcp_device, module);
+		if (ret < 0)
+			goto fail;
+	}
+	mutex_unlock(&netcp_modules_lock);
+	return 0;
+
+fail:
+	mutex_unlock(&netcp_modules_lock);
+	netcp_unregister_module(module);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_register_module);
+
+static void netcp_release_module(struct netcp_device *netcp_device,
+				 struct netcp_module *module)
+{
+	struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
+	struct netcp_intf *netcp_intf, *netcp_tmp;
+	struct device *dev = netcp_device->device;
+
+	/* Release the module from each interface */
+	list_for_each_entry_safe(netcp_intf, netcp_tmp,
+				 &netcp_device->interface_head,
+				 interface_list) {
+		struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
+
+		list_for_each_entry_safe(intf_modpriv, intf_tmp,
+					 &netcp_intf->module_head,
+					 intf_list) {
+			if (intf_modpriv->netcp_module == module) {
+				module->release(intf_modpriv->module_priv);
+				list_del(&intf_modpriv->intf_list);
+				devm_kfree(dev, intf_modpriv);
+				break;
+			}
+		}
+	}
+
+	/* Remove the module from each instance */
+	list_for_each_entry_safe(inst_modpriv, inst_tmp,
+				 &netcp_device->modpriv_head, inst_list) {
+		if (inst_modpriv->netcp_module == module) {
+			module->remove(netcp_device,
+				       inst_modpriv->module_priv);
+			list_del(&inst_modpriv->inst_list);
+			devm_kfree(dev, inst_modpriv);
+			break;
+		}
+	}
+}
+
+void netcp_unregister_module(struct netcp_module *module)
+{
+	struct netcp_device *netcp_device;
+	struct netcp_module *module_tmp;
+
+	mutex_lock(&netcp_modules_lock);
+
+	list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+		netcp_release_module(netcp_device, module);
+	}
+
+	/* Remove the module from the module list */
+	for_each_netcp_module(module_tmp) {
+		if (module == module_tmp) {
+			list_del(&module->module_list);
+			break;
+		}
+	}
+
+	mutex_unlock(&netcp_modules_lock);
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_module);
+
+void *netcp_module_get_intf_data(struct netcp_module *module,
+				 struct netcp_intf *intf)
+{
+	struct netcp_intf_modpriv *intf_modpriv;
+
+	list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
+		if (intf_modpriv->netcp_module == module)
+			return intf_modpriv->module_priv;
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
+
+/* Module TX and RX Hook management */
+struct netcp_hook_list {
+	struct list_head	 list;
+	netcp_hook_rtn		*hook_rtn;
+	void			*hook_data;
+	int			 order;
+};
+
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+			  netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+	struct netcp_hook_list *entry;
+	struct netcp_hook_list *next;
+	unsigned long flags;
+
+	entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->hook_rtn  = hook_rtn;
+	entry->hook_data = hook_data;
+	entry->order     = order;
+
+	spin_lock_irqsave(&netcp_priv->lock, flags);
+	list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
+		if (next->order > order)
+			break;
+	}
+	__list_add(&entry->list, next->list.prev, &next->list);
+	spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_register_txhook);
+
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+			    netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+	struct netcp_hook_list *next, *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&netcp_priv->lock, flags);
+	list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
+		if ((next->order     == order) &&
+		    (next->hook_rtn  == hook_rtn) &&
+		    (next->hook_data == hook_data)) {
+			list_del(&next->list);
+			spin_unlock_irqrestore(&netcp_priv->lock, flags);
+			devm_kfree(netcp_priv->dev, next);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&netcp_priv->lock, flags);
+	return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
+
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+			  netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+	struct netcp_hook_list *entry;
+	struct netcp_hook_list *next;
+	unsigned long flags;
+
+	entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->hook_rtn  = hook_rtn;
+	entry->hook_data = hook_data;
+	entry->order     = order;
+
+	spin_lock_irqsave(&netcp_priv->lock, flags);
+	list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
+		if (next->order > order)
+			break;
+	}
+	__list_add(&entry->list, next->list.prev, &next->list);
+	spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_register_rxhook);
+
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+			    netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+	struct netcp_hook_list *next, *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&netcp_priv->lock, flags);
+	list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
+		if ((next->order     == order) &&
+		    (next->hook_rtn  == hook_rtn) &&
+		    (next->hook_data == hook_data)) {
+			list_del(&next->list);
+			spin_unlock_irqrestore(&netcp_priv->lock, flags);
+			devm_kfree(netcp_priv->dev, next);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_rxhook);
+
+static void netcp_frag_free(bool is_frag, void *ptr)
+{
+	if (is_frag)
+		skb_free_frag(ptr);
+	else
+		kfree(ptr);
+}
+
+static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
+				     struct knav_dma_desc *desc)
+{
+	struct knav_dma_desc *ndesc;
+	dma_addr_t dma_desc, dma_buf;
+	unsigned int buf_len, dma_sz = sizeof(*ndesc);
+	void *buf_ptr;
+	u32 tmp;
+
+	get_words(&dma_desc, 1, &desc->next_desc);
+
+	while (dma_desc) {
+		ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+		if (unlikely(!ndesc)) {
+			dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+			break;
+		}
+		get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
+		/* warning!!!! We are retrieving the virtual ptr in the sw_data
+		 * field as a 32bit value. Will not work on 64bit machines
+		 */
+		buf_ptr = (void *)GET_SW_DATA0(ndesc);
+		buf_len = (int)GET_SW_DATA1(desc);
+		dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_page(buf_ptr);
+		knav_pool_desc_put(netcp->rx_pool, desc);
+	}
+	/* warning!!!! We are retrieving the virtual ptr in the sw_data
+	 * field as a 32bit value. Will not work on 64bit machines
+	 */
+	buf_ptr = (void *)GET_SW_DATA0(desc);
+	buf_len = (int)GET_SW_DATA1(desc);
+
+	if (buf_ptr)
+		netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
+	knav_pool_desc_put(netcp->rx_pool, desc);
+}
+
+static void netcp_empty_rx_queue(struct netcp_intf *netcp)
+{
+	struct netcp_stats *rx_stats = &netcp->stats;
+	struct knav_dma_desc *desc;
+	unsigned int dma_sz;
+	dma_addr_t dma;
+
+	for (; ;) {
+		dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
+		if (!dma)
+			break;
+
+		desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+		if (unlikely(!desc)) {
+			dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
+				__func__);
+			rx_stats->rx_errors++;
+			continue;
+		}
+		netcp_free_rx_desc_chain(netcp, desc);
+		rx_stats->rx_dropped++;
+	}
+}
+
+static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
+{
+	struct netcp_stats *rx_stats = &netcp->stats;
+	unsigned int dma_sz, buf_len, org_buf_len;
+	struct knav_dma_desc *desc, *ndesc;
+	unsigned int pkt_sz = 0, accum_sz;
+	struct netcp_hook_list *rx_hook;
+	dma_addr_t dma_desc, dma_buff;
+	struct netcp_packet p_info;
+	struct sk_buff *skb;
+	void *org_buf_ptr;
+	u32 tmp;
+
+	dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
+	if (!dma_desc)
+		return -1;
+
+	desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+	if (unlikely(!desc)) {
+		dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+		return 0;
+	}
+
+	get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
+	/* warning!!!! We are retrieving the virtual ptr in the sw_data
+	 * field as a 32bit value. Will not work on 64bit machines
+	 */
+	org_buf_ptr = (void *)GET_SW_DATA0(desc);
+	org_buf_len = (int)GET_SW_DATA1(desc);
+
+	if (unlikely(!org_buf_ptr)) {
+		dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+		goto free_desc;
+	}
+
+	pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
+	accum_sz = buf_len;
+	dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
+
+	/* Build a new sk_buff for the primary buffer */
+	skb = build_skb(org_buf_ptr, org_buf_len);
+	if (unlikely(!skb)) {
+		dev_err(netcp->ndev_dev, "build_skb() failed\n");
+		goto free_desc;
+	}
+
+	/* update data, tail and len */
+	skb_reserve(skb, NETCP_SOP_OFFSET);
+	__skb_put(skb, buf_len);
+
+	/* Fill in the page fragment list */
+	while (dma_desc) {
+		struct page *page;
+
+		ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+		if (unlikely(!ndesc)) {
+			dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+			goto free_desc;
+		}
+
+		get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
+		/* warning!!!! We are retrieving the virtual ptr in the sw_data
+		 * field as a 32bit value. Will not work on 64bit machines
+		 */
+		page = (struct page *)GET_SW_DATA0(ndesc);
+
+		if (likely(dma_buff && buf_len && page)) {
+			dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
+				       DMA_FROM_DEVICE);
+		} else {
+			dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
+				&dma_buff, buf_len, page);
+			goto free_desc;
+		}
+
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+				offset_in_page(dma_buff), buf_len, PAGE_SIZE);
+		accum_sz += buf_len;
+
+		/* Free the descriptor */
+		knav_pool_desc_put(netcp->rx_pool, ndesc);
+	}
+
+	/* check for packet len and warn */
+	if (unlikely(pkt_sz != accum_sz))
+		dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
+			pkt_sz, accum_sz);
+
+	/* Newer version of the Ethernet switch can trim the Ethernet FCS
+	 * from the packet and is indicated in hw_cap. So trim it only for
+	 * older h/w
+	 */
+	if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
+		__pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+	/* Call each of the RX hooks */
+	p_info.skb = skb;
+	skb->dev = netcp->ndev;
+	p_info.rxtstamp_complete = false;
+	get_desc_info(&tmp, &p_info.eflags, desc);
+	p_info.epib = desc->epib;
+	p_info.psdata = (u32 __force *)desc->psdata;
+	p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) &
+			 KNAV_DMA_DESC_EFLAGS_MASK);
+	list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
+		int ret;
+
+		ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
+					&p_info);
+		if (unlikely(ret)) {
+			dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
+				rx_hook->order, ret);
+			/* Free the primary descriptor */
+			rx_stats->rx_dropped++;
+			knav_pool_desc_put(netcp->rx_pool, desc);
+			dev_kfree_skb(skb);
+			return 0;
+		}
+	}
+	/* Free the primary descriptor */
+	knav_pool_desc_put(netcp->rx_pool, desc);
+
+	u64_stats_update_begin(&rx_stats->syncp_rx);
+	rx_stats->rx_packets++;
+	rx_stats->rx_bytes += skb->len;
+	u64_stats_update_end(&rx_stats->syncp_rx);
+
+	/* push skb up the stack */
+	skb->protocol = eth_type_trans(skb, netcp->ndev);
+	netif_receive_skb(skb);
+	return 0;
+
+free_desc:
+	netcp_free_rx_desc_chain(netcp, desc);
+	rx_stats->rx_errors++;
+	return 0;
+}
+
+static int netcp_process_rx_packets(struct netcp_intf *netcp,
+				    unsigned int budget)
+{
+	int i;
+
+	for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
+		;
+	return i;
+}
+
+/* Release descriptors and attached buffers from Rx FDQ */
+static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+	struct knav_dma_desc *desc;
+	unsigned int buf_len, dma_sz;
+	dma_addr_t dma;
+	void *buf_ptr;
+
+	/* Allocate descriptor */
+	while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
+		desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+		if (unlikely(!desc)) {
+			dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+			continue;
+		}
+
+		get_org_pkt_info(&dma, &buf_len, desc);
+		/* warning!!!! We are retrieving the virtual ptr in the sw_data
+		 * field as a 32bit value. Will not work on 64bit machines
+		 */
+		buf_ptr = (void *)GET_SW_DATA0(desc);
+
+		if (unlikely(!dma)) {
+			dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
+			knav_pool_desc_put(netcp->rx_pool, desc);
+			continue;
+		}
+
+		if (unlikely(!buf_ptr)) {
+			dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+			knav_pool_desc_put(netcp->rx_pool, desc);
+			continue;
+		}
+
+		if (fdq == 0) {
+			dma_unmap_single(netcp->dev, dma, buf_len,
+					 DMA_FROM_DEVICE);
+			netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
+		} else {
+			dma_unmap_page(netcp->dev, dma, buf_len,
+				       DMA_FROM_DEVICE);
+			__free_page(buf_ptr);
+		}
+
+		knav_pool_desc_put(netcp->rx_pool, desc);
+	}
+}
+
+static void netcp_rxpool_free(struct netcp_intf *netcp)
+{
+	int i;
+
+	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+	     !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
+		netcp_free_rx_buf(netcp, i);
+
+	if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
+		dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
+			netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
+
+	knav_pool_destroy(netcp->rx_pool);
+	netcp->rx_pool = NULL;
+}
+
+static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+	struct knav_dma_desc *hwdesc;
+	unsigned int buf_len, dma_sz;
+	u32 desc_info, pkt_info;
+	struct page *page;
+	dma_addr_t dma;
+	void *bufptr;
+	u32 sw_data[2];
+
+	/* Allocate descriptor */
+	hwdesc = knav_pool_desc_get(netcp->rx_pool);
+	if (IS_ERR_OR_NULL(hwdesc)) {
+		dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
+		return -ENOMEM;
+	}
+
+	if (likely(fdq == 0)) {
+		unsigned int primary_buf_len;
+		/* Allocate a primary receive queue entry */
+		buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
+		primary_buf_len = SKB_DATA_ALIGN(buf_len) +
+				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+		bufptr = netdev_alloc_frag(primary_buf_len);
+		sw_data[1] = primary_buf_len;
+
+		if (unlikely(!bufptr)) {
+			dev_warn_ratelimited(netcp->ndev_dev,
+					     "Primary RX buffer alloc failed\n");
+			goto fail;
+		}
+		dma = dma_map_single(netcp->dev, bufptr, buf_len,
+				     DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(netcp->dev, dma)))
+			goto fail;
+
+		/* warning!!!! We are saving the virtual ptr in the sw_data
+		 * field as a 32bit value. Will not work on 64bit machines
+		 */
+		sw_data[0] = (u32)bufptr;
+	} else {
+		/* Allocate a secondary receive queue entry */
+		page = alloc_page(GFP_ATOMIC | GFP_DMA);
+		if (unlikely(!page)) {
+			dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
+			goto fail;
+		}
+		buf_len = PAGE_SIZE;
+		dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
+		/* warning!!!! We are saving the virtual ptr in the sw_data
+		 * field as a 32bit value. Will not work on 64bit machines
+		 */
+		sw_data[0] = (u32)page;
+		sw_data[1] = 0;
+	}
+
+	desc_info =  KNAV_DMA_DESC_PS_INFO_IN_DESC;
+	desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
+	pkt_info =  KNAV_DMA_DESC_HAS_EPIB;
+	pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
+	pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
+		    KNAV_DMA_DESC_RETQ_SHIFT;
+	set_org_pkt_info(dma, buf_len, hwdesc);
+	SET_SW_DATA0(sw_data[0], hwdesc);
+	SET_SW_DATA1(sw_data[1], hwdesc);
+	set_desc_info(desc_info, pkt_info, hwdesc);
+
+	/* Push to FDQs */
+	knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
+			   &dma_sz);
+	knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
+	return 0;
+
+fail:
+	knav_pool_desc_put(netcp->rx_pool, hwdesc);
+	return -ENOMEM;
+}
+
+/* Refill Rx FDQ with descriptors & attached buffers */
+static void netcp_rxpool_refill(struct netcp_intf *netcp)
+{
+	u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
+	int i, ret = 0;
+
+	/* Calculate the FDQ deficit and refill */
+	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
+		fdq_deficit[i] = netcp->rx_queue_depths[i] -
+				 knav_queue_get_count(netcp->rx_fdq[i]);
+
+		while (fdq_deficit[i]-- && !ret)
+			ret = netcp_allocate_rx_buf(netcp, i);
+	} /* end for fdqs */
+}
+
+/* NAPI poll */
+static int netcp_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+						rx_napi);
+	unsigned int packets;
+
+	packets = netcp_process_rx_packets(netcp, budget);
+
+	netcp_rxpool_refill(netcp);
+	if (packets < budget) {
+		napi_complete_done(&netcp->rx_napi, packets);
+		knav_queue_enable_notify(netcp->rx_queue);
+	}
+
+	return packets;
+}
+
+static void netcp_rx_notify(void *arg)
+{
+	struct netcp_intf *netcp = arg;
+
+	knav_queue_disable_notify(netcp->rx_queue);
+	napi_schedule(&netcp->rx_napi);
+}
+
+static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
+				     struct knav_dma_desc *desc,
+				     unsigned int desc_sz)
+{
+	struct knav_dma_desc *ndesc = desc;
+	dma_addr_t dma_desc, dma_buf;
+	unsigned int buf_len;
+
+	while (ndesc) {
+		get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
+
+		if (dma_buf && buf_len)
+			dma_unmap_single(netcp->dev, dma_buf, buf_len,
+					 DMA_TO_DEVICE);
+		else
+			dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n",
+				 &dma_buf, buf_len);
+
+		knav_pool_desc_put(netcp->tx_pool, ndesc);
+		ndesc = NULL;
+		if (dma_desc) {
+			ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
+						     desc_sz);
+			if (!ndesc)
+				dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+		}
+	}
+}
+
+static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
+					  unsigned int budget)
+{
+	struct netcp_stats *tx_stats = &netcp->stats;
+	struct knav_dma_desc *desc;
+	struct netcp_tx_cb *tx_cb;
+	struct sk_buff *skb;
+	unsigned int dma_sz;
+	dma_addr_t dma;
+	int pkts = 0;
+
+	while (budget--) {
+		dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
+		if (!dma)
+			break;
+		desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
+		if (unlikely(!desc)) {
+			dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+			tx_stats->tx_errors++;
+			continue;
+		}
+
+		/* warning!!!! We are retrieving the virtual ptr in the sw_data
+		 * field as a 32bit value. Will not work on 64bit machines
+		 */
+		skb = (struct sk_buff *)GET_SW_DATA0(desc);
+		netcp_free_tx_desc_chain(netcp, desc, dma_sz);
+		if (!skb) {
+			dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
+			tx_stats->tx_errors++;
+			continue;
+		}
+
+		tx_cb = (struct netcp_tx_cb *)skb->cb;
+		if (tx_cb->txtstamp)
+			tx_cb->txtstamp(tx_cb->ts_context, skb);
+
+		if (netif_subqueue_stopped(netcp->ndev, skb) &&
+		    netif_running(netcp->ndev) &&
+		    (knav_pool_count(netcp->tx_pool) >
+		    netcp->tx_resume_threshold)) {
+			u16 subqueue = skb_get_queue_mapping(skb);
+
+			netif_wake_subqueue(netcp->ndev, subqueue);
+		}
+
+		u64_stats_update_begin(&tx_stats->syncp_tx);
+		tx_stats->tx_packets++;
+		tx_stats->tx_bytes += skb->len;
+		u64_stats_update_end(&tx_stats->syncp_tx);
+		dev_kfree_skb(skb);
+		pkts++;
+	}
+	return pkts;
+}
+
+static int netcp_tx_poll(struct napi_struct *napi, int budget)
+{
+	int packets;
+	struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+						tx_napi);
+
+	packets = netcp_process_tx_compl_packets(netcp, budget);
+	if (packets < budget) {
+		napi_complete(&netcp->tx_napi);
+		knav_queue_enable_notify(netcp->tx_compl_q);
+	}
+
+	return packets;
+}
+
+static void netcp_tx_notify(void *arg)
+{
+	struct netcp_intf *netcp = arg;
+
+	knav_queue_disable_notify(netcp->tx_compl_q);
+	napi_schedule(&netcp->tx_napi);
+}
+
+static struct knav_dma_desc*
+netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
+{
+	struct knav_dma_desc *desc, *ndesc, *pdesc;
+	unsigned int pkt_len = skb_headlen(skb);
+	struct device *dev = netcp->dev;
+	dma_addr_t dma_addr;
+	unsigned int dma_sz;
+	int i;
+
+	/* Map the linear buffer */
+	dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, dma_addr))) {
+		dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
+		return NULL;
+	}
+
+	desc = knav_pool_desc_get(netcp->tx_pool);
+	if (IS_ERR_OR_NULL(desc)) {
+		dev_err(netcp->ndev_dev, "out of TX desc\n");
+		dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
+		return NULL;
+	}
+
+	set_pkt_info(dma_addr, pkt_len, 0, desc);
+	if (skb_is_nonlinear(skb)) {
+		prefetchw(skb_shinfo(skb));
+	} else {
+		desc->next_desc = 0;
+		goto upd_pkt_len;
+	}
+
+	pdesc = desc;
+
+	/* Handle the case where skb is fragmented in pages */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		struct page *page = skb_frag_page(frag);
+		u32 page_offset = frag->page_offset;
+		u32 buf_len = skb_frag_size(frag);
+		dma_addr_t desc_dma;
+		u32 desc_dma_32;
+
+		dma_addr = dma_map_page(dev, page, page_offset, buf_len,
+					DMA_TO_DEVICE);
+		if (unlikely(!dma_addr)) {
+			dev_err(netcp->ndev_dev, "Failed to map skb page\n");
+			goto free_descs;
+		}
+
+		ndesc = knav_pool_desc_get(netcp->tx_pool);
+		if (IS_ERR_OR_NULL(ndesc)) {
+			dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
+			dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
+			goto free_descs;
+		}
+
+		desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
+		set_pkt_info(dma_addr, buf_len, 0, ndesc);
+		desc_dma_32 = (u32)desc_dma;
+		set_words(&desc_dma_32, 1, &pdesc->next_desc);
+		pkt_len += buf_len;
+		if (pdesc != desc)
+			knav_pool_desc_map(netcp->tx_pool, pdesc,
+					   sizeof(*pdesc), &desc_dma, &dma_sz);
+		pdesc = ndesc;
+	}
+	if (pdesc != desc)
+		knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
+				   &dma_addr, &dma_sz);
+
+	/* frag list based linkage is not supported for now. */
+	if (skb_shinfo(skb)->frag_list) {
+		dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
+		goto free_descs;
+	}
+
+upd_pkt_len:
+	WARN_ON(pkt_len != skb->len);
+
+	pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
+	set_words(&pkt_len, 1, &desc->desc_info);
+	return desc;
+
+free_descs:
+	netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+	return NULL;
+}
+
+static int netcp_tx_submit_skb(struct netcp_intf *netcp,
+			       struct sk_buff *skb,
+			       struct knav_dma_desc *desc)
+{
+	struct netcp_tx_pipe *tx_pipe = NULL;
+	struct netcp_hook_list *tx_hook;
+	struct netcp_packet p_info;
+	struct netcp_tx_cb *tx_cb;
+	unsigned int dma_sz;
+	dma_addr_t dma;
+	u32 tmp = 0;
+	int ret = 0;
+
+	p_info.netcp = netcp;
+	p_info.skb = skb;
+	p_info.tx_pipe = NULL;
+	p_info.psdata_len = 0;
+	p_info.ts_context = NULL;
+	p_info.txtstamp = NULL;
+	p_info.epib = desc->epib;
+	p_info.psdata = (u32 __force *)desc->psdata;
+	memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32));
+
+	/* Find out where to inject the packet for transmission */
+	list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
+		ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
+					&p_info);
+		if (unlikely(ret != 0)) {
+			dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
+				tx_hook->order, ret);
+			ret = (ret < 0) ? ret : NETDEV_TX_OK;
+			goto out;
+		}
+	}
+
+	/* Make sure some TX hook claimed the packet */
+	tx_pipe = p_info.tx_pipe;
+	if (!tx_pipe) {
+		dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	tx_cb = (struct netcp_tx_cb *)skb->cb;
+	tx_cb->ts_context = p_info.ts_context;
+	tx_cb->txtstamp = p_info.txtstamp;
+
+	/* update descriptor */
+	if (p_info.psdata_len) {
+		/* psdata points to both native-endian and device-endian data */
+		__le32 *psdata = (void __force *)p_info.psdata;
+
+		set_words((u32 *)psdata +
+			  (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len),
+			  p_info.psdata_len, psdata);
+		tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
+			KNAV_DMA_DESC_PSLEN_SHIFT;
+	}
+
+	tmp |= KNAV_DMA_DESC_HAS_EPIB |
+		((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
+		KNAV_DMA_DESC_RETQ_SHIFT);
+
+	if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
+		tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
+			KNAV_DMA_DESC_PSFLAG_SHIFT);
+	}
+
+	set_words(&tmp, 1, &desc->packet_info);
+	/* warning!!!! We are saving the virtual ptr in the sw_data
+	 * field as a 32bit value. Will not work on 64bit machines
+	 */
+	SET_SW_DATA0((u32)skb, desc);
+
+	if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
+		tmp = tx_pipe->switch_to_port;
+		set_words(&tmp, 1, &desc->tag_info);
+	}
+
+	/* submit packet descriptor */
+	ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
+				 &dma_sz);
+	if (unlikely(ret)) {
+		dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+	skb_tx_timestamp(skb);
+	knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
+
+out:
+	return ret;
+}
+
+/* Submit the packet */
+static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_stats *tx_stats = &netcp->stats;
+	int subqueue = skb_get_queue_mapping(skb);
+	struct knav_dma_desc *desc;
+	int desc_count, ret = 0;
+
+	if (unlikely(skb->len <= 0)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
+		ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
+		if (ret < 0) {
+			/* If we get here, the skb has already been dropped */
+			dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
+				 ret);
+			tx_stats->tx_dropped++;
+			return ret;
+		}
+		skb->len = NETCP_MIN_PACKET_SIZE;
+	}
+
+	desc = netcp_tx_map_skb(skb, netcp);
+	if (unlikely(!desc)) {
+		netif_stop_subqueue(ndev, subqueue);
+		ret = -ENOBUFS;
+		goto drop;
+	}
+
+	ret = netcp_tx_submit_skb(netcp, skb, desc);
+	if (ret)
+		goto drop;
+
+	/* Check Tx pool count & stop subqueue if needed */
+	desc_count = knav_pool_count(netcp->tx_pool);
+	if (desc_count < netcp->tx_pause_threshold) {
+		dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
+		netif_stop_subqueue(ndev, subqueue);
+	}
+	return NETDEV_TX_OK;
+
+drop:
+	tx_stats->tx_dropped++;
+	if (desc)
+		netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+	dev_kfree_skb(skb);
+	return ret;
+}
+
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
+{
+	if (tx_pipe->dma_channel) {
+		knav_dma_close_channel(tx_pipe->dma_channel);
+		tx_pipe->dma_channel = NULL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_close);
+
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
+{
+	struct device *dev = tx_pipe->netcp_device->device;
+	struct knav_dma_cfg config;
+	int ret = 0;
+	u8 name[16];
+
+	memset(&config, 0, sizeof(config));
+	config.direction = DMA_MEM_TO_DEV;
+	config.u.tx.filt_einfo = false;
+	config.u.tx.filt_pswords = false;
+	config.u.tx.priority = DMA_PRIO_MED_L;
+
+	tx_pipe->dma_channel = knav_dma_open_channel(dev,
+				tx_pipe->dma_chan_name, &config);
+	if (IS_ERR(tx_pipe->dma_channel)) {
+		dev_err(dev, "failed opening tx chan(%s)\n",
+			tx_pipe->dma_chan_name);
+		ret = PTR_ERR(tx_pipe->dma_channel);
+		goto err;
+	}
+
+	snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
+	tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
+					     KNAV_QUEUE_SHARED);
+	if (IS_ERR(tx_pipe->dma_queue)) {
+		dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
+			name, ret);
+		ret = PTR_ERR(tx_pipe->dma_queue);
+		goto err;
+	}
+
+	dev_dbg(dev, "opened tx pipe %s\n", name);
+	return 0;
+
+err:
+	if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+		knav_dma_close_channel(tx_pipe->dma_channel);
+	tx_pipe->dma_channel = NULL;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_open);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+		      struct netcp_device *netcp_device,
+		      const char *dma_chan_name, unsigned int dma_queue_id)
+{
+	memset(tx_pipe, 0, sizeof(*tx_pipe));
+	tx_pipe->netcp_device = netcp_device;
+	tx_pipe->dma_chan_name = dma_chan_name;
+	tx_pipe->dma_queue_id = dma_queue_id;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_init);
+
+static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
+					  const u8 *addr,
+					  enum netcp_addr_type type)
+{
+	struct netcp_addr *naddr;
+
+	list_for_each_entry(naddr, &netcp->addr_list, node) {
+		if (naddr->type != type)
+			continue;
+		if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
+			continue;
+		return naddr;
+	}
+
+	return NULL;
+}
+
+static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
+					 const u8 *addr,
+					 enum netcp_addr_type type)
+{
+	struct netcp_addr *naddr;
+
+	naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
+	if (!naddr)
+		return NULL;
+
+	naddr->type = type;
+	naddr->flags = 0;
+	naddr->netcp = netcp;
+	if (addr)
+		ether_addr_copy(naddr->addr, addr);
+	else
+		eth_zero_addr(naddr->addr);
+	list_add_tail(&naddr->node, &netcp->addr_list);
+
+	return naddr;
+}
+
+static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
+{
+	list_del(&naddr->node);
+	devm_kfree(netcp->dev, naddr);
+}
+
+static void netcp_addr_clear_mark(struct netcp_intf *netcp)
+{
+	struct netcp_addr *naddr;
+
+	list_for_each_entry(naddr, &netcp->addr_list, node)
+		naddr->flags = 0;
+}
+
+static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
+				enum netcp_addr_type type)
+{
+	struct netcp_addr *naddr;
+
+	naddr = netcp_addr_find(netcp, addr, type);
+	if (naddr) {
+		naddr->flags |= ADDR_VALID;
+		return;
+	}
+
+	naddr = netcp_addr_add(netcp, addr, type);
+	if (!WARN_ON(!naddr))
+		naddr->flags |= ADDR_NEW;
+}
+
+static void netcp_addr_sweep_del(struct netcp_intf *netcp)
+{
+	struct netcp_addr *naddr, *tmp;
+	struct netcp_intf_modpriv *priv;
+	struct netcp_module *module;
+	int error;
+
+	list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+		if (naddr->flags & (ADDR_VALID | ADDR_NEW))
+			continue;
+		dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
+			naddr->addr, naddr->type);
+		for_each_module(netcp, priv) {
+			module = priv->netcp_module;
+			if (!module->del_addr)
+				continue;
+			error = module->del_addr(priv->module_priv,
+						 naddr);
+			WARN_ON(error);
+		}
+		netcp_addr_del(netcp, naddr);
+	}
+}
+
+static void netcp_addr_sweep_add(struct netcp_intf *netcp)
+{
+	struct netcp_addr *naddr, *tmp;
+	struct netcp_intf_modpriv *priv;
+	struct netcp_module *module;
+	int error;
+
+	list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+		if (!(naddr->flags & ADDR_NEW))
+			continue;
+		dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
+			naddr->addr, naddr->type);
+
+		for_each_module(netcp, priv) {
+			module = priv->netcp_module;
+			if (!module->add_addr)
+				continue;
+			error = module->add_addr(priv->module_priv, naddr);
+			WARN_ON(error);
+		}
+	}
+}
+
+static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc)
+{
+	struct netcp_intf_modpriv *priv;
+	struct netcp_module *module;
+	int error;
+
+	for_each_module(netcp, priv) {
+		module = priv->netcp_module;
+		if (!module->set_rx_mode)
+			continue;
+
+		error = module->set_rx_mode(priv->module_priv, promisc);
+		if (error)
+			return error;
+	}
+	return 0;
+}
+
+static void netcp_set_rx_mode(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netdev_hw_addr *ndev_addr;
+	bool promisc;
+
+	promisc = (ndev->flags & IFF_PROMISC ||
+		   ndev->flags & IFF_ALLMULTI ||
+		   netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
+
+	spin_lock(&netcp->lock);
+	/* first clear all marks */
+	netcp_addr_clear_mark(netcp);
+
+	/* next add new entries, mark existing ones */
+	netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
+	for_each_dev_addr(ndev, ndev_addr)
+		netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
+	netdev_for_each_uc_addr(ndev_addr, ndev)
+		netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
+	netdev_for_each_mc_addr(ndev_addr, ndev)
+		netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
+
+	if (promisc)
+		netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
+
+	/* finally sweep and callout into modules */
+	netcp_addr_sweep_del(netcp);
+	netcp_addr_sweep_add(netcp);
+	netcp_set_promiscuous(netcp, promisc);
+	spin_unlock(&netcp->lock);
+}
+
+static void netcp_free_navigator_resources(struct netcp_intf *netcp)
+{
+	int i;
+
+	if (netcp->rx_channel) {
+		knav_dma_close_channel(netcp->rx_channel);
+		netcp->rx_channel = NULL;
+	}
+
+	if (!IS_ERR_OR_NULL(netcp->rx_pool))
+		netcp_rxpool_free(netcp);
+
+	if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
+		knav_queue_close(netcp->rx_queue);
+		netcp->rx_queue = NULL;
+	}
+
+	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+	     !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
+		knav_queue_close(netcp->rx_fdq[i]);
+		netcp->rx_fdq[i] = NULL;
+	}
+
+	if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+		knav_queue_close(netcp->tx_compl_q);
+		netcp->tx_compl_q = NULL;
+	}
+
+	if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
+		knav_pool_destroy(netcp->tx_pool);
+		netcp->tx_pool = NULL;
+	}
+}
+
+static int netcp_setup_navigator_resources(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct knav_queue_notify_config notify_cfg;
+	struct knav_dma_cfg config;
+	u32 last_fdq = 0;
+	u8 name[16];
+	int ret;
+	int i;
+
+	/* Create Rx/Tx descriptor pools */
+	snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
+	netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
+						netcp->rx_pool_region_id);
+	if (IS_ERR_OR_NULL(netcp->rx_pool)) {
+		dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
+		ret = PTR_ERR(netcp->rx_pool);
+		goto fail;
+	}
+
+	snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
+	netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
+						netcp->tx_pool_region_id);
+	if (IS_ERR_OR_NULL(netcp->tx_pool)) {
+		dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
+		ret = PTR_ERR(netcp->tx_pool);
+		goto fail;
+	}
+
+	/* open Tx completion queue */
+	snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
+	netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
+	if (IS_ERR(netcp->tx_compl_q)) {
+		ret = PTR_ERR(netcp->tx_compl_q);
+		goto fail;
+	}
+	netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
+
+	/* Set notification for Tx completion */
+	notify_cfg.fn = netcp_tx_notify;
+	notify_cfg.fn_arg = netcp;
+	ret = knav_queue_device_control(netcp->tx_compl_q,
+					KNAV_QUEUE_SET_NOTIFIER,
+					(unsigned long)&notify_cfg);
+	if (ret)
+		goto fail;
+
+	knav_queue_disable_notify(netcp->tx_compl_q);
+
+	/* open Rx completion queue */
+	snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
+	netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
+	if (IS_ERR(netcp->rx_queue)) {
+		ret = PTR_ERR(netcp->rx_queue);
+		goto fail;
+	}
+	netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
+
+	/* Set notification for Rx completion */
+	notify_cfg.fn = netcp_rx_notify;
+	notify_cfg.fn_arg = netcp;
+	ret = knav_queue_device_control(netcp->rx_queue,
+					KNAV_QUEUE_SET_NOTIFIER,
+					(unsigned long)&notify_cfg);
+	if (ret)
+		goto fail;
+
+	knav_queue_disable_notify(netcp->rx_queue);
+
+	/* open Rx FDQs */
+	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+	     ++i) {
+		snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
+		netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+		if (IS_ERR(netcp->rx_fdq[i])) {
+			ret = PTR_ERR(netcp->rx_fdq[i]);
+			goto fail;
+		}
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.direction		= DMA_DEV_TO_MEM;
+	config.u.rx.einfo_present	= true;
+	config.u.rx.psinfo_present	= true;
+	config.u.rx.err_mode		= DMA_DROP;
+	config.u.rx.desc_type		= DMA_DESC_HOST;
+	config.u.rx.psinfo_at_sop	= false;
+	config.u.rx.sop_offset		= NETCP_SOP_OFFSET;
+	config.u.rx.dst_q		= netcp->rx_queue_id;
+	config.u.rx.thresh		= DMA_THRESH_NONE;
+
+	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
+		if (netcp->rx_fdq[i])
+			last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
+		config.u.rx.fdq[i] = last_fdq;
+	}
+
+	netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
+					netcp->dma_chan_name, &config);
+	if (IS_ERR(netcp->rx_channel)) {
+		dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
+			netcp->dma_chan_name);
+		ret = PTR_ERR(netcp->rx_channel);
+		goto fail;
+	}
+
+	dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
+	return 0;
+
+fail:
+	netcp_free_navigator_resources(netcp);
+	return ret;
+}
+
+/* Open the device */
+static int netcp_ndo_open(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_intf_modpriv *intf_modpriv;
+	struct netcp_module *module;
+	int ret;
+
+	netif_carrier_off(ndev);
+	ret = netcp_setup_navigator_resources(ndev);
+	if (ret) {
+		dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
+		goto fail;
+	}
+
+	for_each_module(netcp, intf_modpriv) {
+		module = intf_modpriv->netcp_module;
+		if (module->open) {
+			ret = module->open(intf_modpriv->module_priv, ndev);
+			if (ret != 0) {
+				dev_err(netcp->ndev_dev, "module open failed\n");
+				goto fail_open;
+			}
+		}
+	}
+
+	napi_enable(&netcp->rx_napi);
+	napi_enable(&netcp->tx_napi);
+	knav_queue_enable_notify(netcp->tx_compl_q);
+	knav_queue_enable_notify(netcp->rx_queue);
+	netcp_rxpool_refill(netcp);
+	netif_tx_wake_all_queues(ndev);
+	dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
+	return 0;
+
+fail_open:
+	for_each_module(netcp, intf_modpriv) {
+		module = intf_modpriv->netcp_module;
+		if (module->close)
+			module->close(intf_modpriv->module_priv, ndev);
+	}
+
+fail:
+	netcp_free_navigator_resources(netcp);
+	return ret;
+}
+
+/* Close the device */
+static int netcp_ndo_stop(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_intf_modpriv *intf_modpriv;
+	struct netcp_module *module;
+	int err = 0;
+
+	netif_tx_stop_all_queues(ndev);
+	netif_carrier_off(ndev);
+	netcp_addr_clear_mark(netcp);
+	netcp_addr_sweep_del(netcp);
+	knav_queue_disable_notify(netcp->rx_queue);
+	knav_queue_disable_notify(netcp->tx_compl_q);
+	napi_disable(&netcp->rx_napi);
+	napi_disable(&netcp->tx_napi);
+
+	for_each_module(netcp, intf_modpriv) {
+		module = intf_modpriv->netcp_module;
+		if (module->close) {
+			err = module->close(intf_modpriv->module_priv, ndev);
+			if (err != 0)
+				dev_err(netcp->ndev_dev, "Close failed\n");
+		}
+	}
+
+	/* Recycle Rx descriptors from completion queue */
+	netcp_empty_rx_queue(netcp);
+
+	/* Recycle Tx descriptors from completion queue */
+	netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+
+	if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
+		dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
+			netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
+
+	netcp_free_navigator_resources(netcp);
+	dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
+	return 0;
+}
+
+static int netcp_ndo_ioctl(struct net_device *ndev,
+			   struct ifreq *req, int cmd)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_intf_modpriv *intf_modpriv;
+	struct netcp_module *module;
+	int ret = -1, err = -EOPNOTSUPP;
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	for_each_module(netcp, intf_modpriv) {
+		module = intf_modpriv->netcp_module;
+		if (!module->ioctl)
+			continue;
+
+		err = module->ioctl(intf_modpriv->module_priv, req, cmd);
+		if ((err < 0) && (err != -EOPNOTSUPP)) {
+			ret = err;
+			goto out;
+		}
+		if (err == 0)
+			ret = err;
+	}
+
+out:
+	return (ret == 0) ? 0 : err;
+}
+
+static void netcp_ndo_tx_timeout(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	unsigned int descs = knav_pool_count(netcp->tx_pool);
+
+	dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
+	netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+	netif_trans_update(ndev);
+	netif_tx_wake_all_queues(ndev);
+}
+
+static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_intf_modpriv *intf_modpriv;
+	struct netcp_module *module;
+	unsigned long flags;
+	int err = 0;
+
+	dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
+
+	spin_lock_irqsave(&netcp->lock, flags);
+	for_each_module(netcp, intf_modpriv) {
+		module = intf_modpriv->netcp_module;
+		if ((module->add_vid) && (vid != 0)) {
+			err = module->add_vid(intf_modpriv->module_priv, vid);
+			if (err != 0) {
+				dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
+					vid);
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&netcp->lock, flags);
+
+	return err;
+}
+
+static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_intf_modpriv *intf_modpriv;
+	struct netcp_module *module;
+	unsigned long flags;
+	int err = 0;
+
+	dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
+
+	spin_lock_irqsave(&netcp->lock, flags);
+	for_each_module(netcp, intf_modpriv) {
+		module = intf_modpriv->netcp_module;
+		if (module->del_vid) {
+			err = module->del_vid(intf_modpriv->module_priv, vid);
+			if (err != 0) {
+				dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
+					vid);
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&netcp->lock, flags);
+	return err;
+}
+
+static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
+			  void *type_data)
+{
+	struct tc_mqprio_qopt *mqprio = type_data;
+	u8 num_tc;
+	int i;
+
+	/* setup tc must be called under rtnl lock */
+	ASSERT_RTNL();
+
+	if (type != TC_SETUP_QDISC_MQPRIO)
+		return -EOPNOTSUPP;
+
+	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+	num_tc = mqprio->num_tc;
+
+	/* Sanity-check the number of traffic classes requested */
+	if ((dev->real_num_tx_queues <= 1) ||
+	    (dev->real_num_tx_queues < num_tc))
+		return -EINVAL;
+
+	/* Configure traffic class to queue mappings */
+	if (num_tc) {
+		netdev_set_num_tc(dev, num_tc);
+		for (i = 0; i < num_tc; i++)
+			netdev_set_tc_queue(dev, i, 1, i);
+	} else {
+		netdev_reset_tc(dev);
+	}
+
+	return 0;
+}
+
+static void
+netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_stats *p = &netcp->stats;
+	u64 rxpackets, rxbytes, txpackets, txbytes;
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&p->syncp_rx);
+		rxpackets       = p->rx_packets;
+		rxbytes         = p->rx_bytes;
+	} while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
+
+	do {
+		start = u64_stats_fetch_begin_irq(&p->syncp_tx);
+		txpackets       = p->tx_packets;
+		txbytes         = p->tx_bytes;
+	} while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
+
+	stats->rx_packets = rxpackets;
+	stats->rx_bytes = rxbytes;
+	stats->tx_packets = txpackets;
+	stats->tx_bytes = txbytes;
+
+	/* The following are stored as 32 bit */
+	stats->rx_errors = p->rx_errors;
+	stats->rx_dropped = p->rx_dropped;
+	stats->tx_dropped = p->tx_dropped;
+}
+
+static const struct net_device_ops netcp_netdev_ops = {
+	.ndo_open		= netcp_ndo_open,
+	.ndo_stop		= netcp_ndo_stop,
+	.ndo_start_xmit		= netcp_ndo_start_xmit,
+	.ndo_set_rx_mode	= netcp_set_rx_mode,
+	.ndo_do_ioctl           = netcp_ndo_ioctl,
+	.ndo_get_stats64        = netcp_get_stats,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_vlan_rx_add_vid	= netcp_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= netcp_rx_kill_vid,
+	.ndo_tx_timeout		= netcp_ndo_tx_timeout,
+	.ndo_select_queue	= dev_pick_tx_zero,
+	.ndo_setup_tc		= netcp_setup_tc,
+};
+
+static int netcp_create_interface(struct netcp_device *netcp_device,
+				  struct device_node *node_interface)
+{
+	struct device *dev = netcp_device->device;
+	struct device_node *node = dev->of_node;
+	struct netcp_intf *netcp;
+	struct net_device *ndev;
+	resource_size_t size;
+	struct resource res;
+	void __iomem *efuse = NULL;
+	u32 efuse_mac = 0;
+	const void *mac_addr;
+	u8 efuse_mac_addr[6];
+	u32 temp[2];
+	int ret = 0;
+
+	ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
+	if (!ndev) {
+		dev_err(dev, "Error allocating netdev\n");
+		return -ENOMEM;
+	}
+
+	ndev->features |= NETIF_F_SG;
+	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	ndev->hw_features = ndev->features;
+	ndev->vlan_features |=  NETIF_F_SG;
+
+	/* MTU range: 68 - 9486 */
+	ndev->min_mtu = ETH_MIN_MTU;
+	ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
+	netcp = netdev_priv(ndev);
+	spin_lock_init(&netcp->lock);
+	INIT_LIST_HEAD(&netcp->module_head);
+	INIT_LIST_HEAD(&netcp->txhook_list_head);
+	INIT_LIST_HEAD(&netcp->rxhook_list_head);
+	INIT_LIST_HEAD(&netcp->addr_list);
+	u64_stats_init(&netcp->stats.syncp_rx);
+	u64_stats_init(&netcp->stats.syncp_tx);
+	netcp->netcp_device = netcp_device;
+	netcp->dev = netcp_device->device;
+	netcp->ndev = ndev;
+	netcp->ndev_dev  = &ndev->dev;
+	netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
+	netcp->tx_pause_threshold = MAX_SKB_FRAGS;
+	netcp->tx_resume_threshold = netcp->tx_pause_threshold;
+	netcp->node_interface = node_interface;
+
+	ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
+	if (efuse_mac) {
+		if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
+			dev_err(dev, "could not find efuse-mac reg resource\n");
+			ret = -ENODEV;
+			goto quit;
+		}
+		size = resource_size(&res);
+
+		if (!devm_request_mem_region(dev, res.start, size,
+					     dev_name(dev))) {
+			dev_err(dev, "could not reserve resource\n");
+			ret = -ENOMEM;
+			goto quit;
+		}
+
+		efuse = devm_ioremap_nocache(dev, res.start, size);
+		if (!efuse) {
+			dev_err(dev, "could not map resource\n");
+			devm_release_mem_region(dev, res.start, size);
+			ret = -ENOMEM;
+			goto quit;
+		}
+
+		emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
+		if (is_valid_ether_addr(efuse_mac_addr))
+			ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
+		else
+			eth_random_addr(ndev->dev_addr);
+
+		devm_iounmap(dev, efuse);
+		devm_release_mem_region(dev, res.start, size);
+	} else {
+		mac_addr = of_get_mac_address(node_interface);
+		if (mac_addr)
+			ether_addr_copy(ndev->dev_addr, mac_addr);
+		else
+			eth_random_addr(ndev->dev_addr);
+	}
+
+	ret = of_property_read_string(node_interface, "rx-channel",
+				      &netcp->dma_chan_name);
+	if (ret < 0) {
+		dev_err(dev, "missing \"rx-channel\" parameter\n");
+		ret = -ENODEV;
+		goto quit;
+	}
+
+	ret = of_property_read_u32(node_interface, "rx-queue",
+				   &netcp->rx_queue_id);
+	if (ret < 0) {
+		dev_warn(dev, "missing \"rx-queue\" parameter\n");
+		netcp->rx_queue_id = KNAV_QUEUE_QPEND;
+	}
+
+	ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
+					 netcp->rx_queue_depths,
+					 KNAV_DMA_FDQ_PER_CHAN);
+	if (ret < 0) {
+		dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
+		netcp->rx_queue_depths[0] = 128;
+	}
+
+	ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
+	if (ret < 0) {
+		dev_err(dev, "missing \"rx-pool\" parameter\n");
+		ret = -ENODEV;
+		goto quit;
+	}
+	netcp->rx_pool_size = temp[0];
+	netcp->rx_pool_region_id = temp[1];
+
+	ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
+	if (ret < 0) {
+		dev_err(dev, "missing \"tx-pool\" parameter\n");
+		ret = -ENODEV;
+		goto quit;
+	}
+	netcp->tx_pool_size = temp[0];
+	netcp->tx_pool_region_id = temp[1];
+
+	if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
+		dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+			MAX_SKB_FRAGS);
+		ret = -ENODEV;
+		goto quit;
+	}
+
+	ret = of_property_read_u32(node_interface, "tx-completion-queue",
+				   &netcp->tx_compl_qid);
+	if (ret < 0) {
+		dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
+		netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
+	}
+
+	/* NAPI register */
+	netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
+	netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
+
+	/* Register the network device */
+	ndev->dev_id		= 0;
+	ndev->watchdog_timeo	= NETCP_TX_TIMEOUT;
+	ndev->netdev_ops	= &netcp_netdev_ops;
+	SET_NETDEV_DEV(ndev, dev);
+
+	list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
+	return 0;
+
+quit:
+	free_netdev(ndev);
+	return ret;
+}
+
+static void netcp_delete_interface(struct netcp_device *netcp_device,
+				   struct net_device *ndev)
+{
+	struct netcp_intf_modpriv *intf_modpriv, *tmp;
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct netcp_module *module;
+
+	dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
+		ndev->name);
+
+	/* Notify each of the modules that the interface is going away */
+	list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
+				 intf_list) {
+		module = intf_modpriv->netcp_module;
+		dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
+			module->name);
+		if (module->release)
+			module->release(intf_modpriv->module_priv);
+		list_del(&intf_modpriv->intf_list);
+	}
+	WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
+	     ndev->name);
+
+	list_del(&netcp->interface_list);
+
+	of_node_put(netcp->node_interface);
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+}
+
+static int netcp_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct netcp_intf *netcp_intf, *netcp_tmp;
+	struct device_node *child, *interfaces;
+	struct netcp_device *netcp_device;
+	struct device *dev = &pdev->dev;
+	struct netcp_module *module;
+	int ret;
+
+	if (!knav_dma_device_ready() ||
+	    !knav_qmss_device_ready())
+		return -EPROBE_DEFER;
+
+	if (!node) {
+		dev_err(dev, "could not find device info\n");
+		return -ENODEV;
+	}
+
+	/* Allocate a new NETCP device instance */
+	netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
+	if (!netcp_device)
+		return -ENOMEM;
+
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable NETCP power-domain\n");
+		pm_runtime_disable(&pdev->dev);
+		return ret;
+	}
+
+	/* Initialize the NETCP device instance */
+	INIT_LIST_HEAD(&netcp_device->interface_head);
+	INIT_LIST_HEAD(&netcp_device->modpriv_head);
+	netcp_device->device = dev;
+	platform_set_drvdata(pdev, netcp_device);
+
+	/* create interfaces */
+	interfaces = of_get_child_by_name(node, "netcp-interfaces");
+	if (!interfaces) {
+		dev_err(dev, "could not find netcp-interfaces node\n");
+		ret = -ENODEV;
+		goto probe_quit;
+	}
+
+	for_each_available_child_of_node(interfaces, child) {
+		ret = netcp_create_interface(netcp_device, child);
+		if (ret) {
+			dev_err(dev, "could not create interface(%s)\n",
+				child->name);
+			goto probe_quit_interface;
+		}
+	}
+
+	of_node_put(interfaces);
+
+	/* Add the device instance to the list */
+	list_add_tail(&netcp_device->device_list, &netcp_devices);
+
+	/* Probe & attach any modules already registered */
+	mutex_lock(&netcp_modules_lock);
+	for_each_netcp_module(module) {
+		ret = netcp_module_probe(netcp_device, module);
+		if (ret < 0)
+			dev_err(dev, "module(%s) probe failed\n", module->name);
+	}
+	mutex_unlock(&netcp_modules_lock);
+	return 0;
+
+probe_quit_interface:
+	list_for_each_entry_safe(netcp_intf, netcp_tmp,
+				 &netcp_device->interface_head,
+				 interface_list) {
+		netcp_delete_interface(netcp_device, netcp_intf->ndev);
+	}
+
+	of_node_put(interfaces);
+
+probe_quit:
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int netcp_remove(struct platform_device *pdev)
+{
+	struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+	struct netcp_intf *netcp_intf, *netcp_tmp;
+	struct netcp_inst_modpriv *inst_modpriv, *tmp;
+	struct netcp_module *module;
+
+	list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
+				 inst_list) {
+		module = inst_modpriv->netcp_module;
+		dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
+		module->remove(netcp_device, inst_modpriv->module_priv);
+		list_del(&inst_modpriv->inst_list);
+	}
+
+	/* now that all modules are removed, clean up the interfaces */
+	list_for_each_entry_safe(netcp_intf, netcp_tmp,
+				 &netcp_device->interface_head,
+				 interface_list) {
+		netcp_delete_interface(netcp_device, netcp_intf->ndev);
+	}
+
+	WARN(!list_empty(&netcp_device->interface_head),
+	     "%s interface list not empty!\n", pdev->name);
+
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id of_match[] = {
+	{ .compatible = "ti,netcp-1.0", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver netcp_driver = {
+	.driver = {
+		.name		= "netcp-1.0",
+		.of_match_table	= of_match,
+	},
+	.probe = netcp_probe,
+	.remove = netcp_remove,
+};
+module_platform_driver(netcp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
new file mode 100644
index 0000000..72b98e2
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -0,0 +1,3881 @@
+/*
+ * Keystone GBE and XGBE subsystem code
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:	Sandeep Nair <sandeep_n@ti.com>
+ *		Sandeep Paulraj <s-paulraj@ti.com>
+ *		Cyril Chemparathy <cyril@ti.com>
+ *		Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *		Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/ethtool.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "netcp.h"
+#include "cpts.h"
+
+#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
+#define NETCP_DRIVER_VERSION		"v1.0"
+
+#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
+#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
+#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
+#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
+
+/* 1G Ethernet SS defines */
+#define GBE_MODULE_NAME			"netcp-gbe"
+#define GBE_SS_VERSION_14		0x4ed2
+
+#define GBE_SS_REG_INDEX		0
+#define GBE_SGMII34_REG_INDEX		1
+#define GBE_SM_REG_INDEX		2
+/* offset relative to base of GBE_SS_REG_INDEX */
+#define GBE13_SGMII_MODULE_OFFSET	0x100
+/* offset relative to base of GBE_SM_REG_INDEX */
+#define GBE13_HOST_PORT_OFFSET		0x34
+#define GBE13_SLAVE_PORT_OFFSET		0x60
+#define GBE13_EMAC_OFFSET		0x100
+#define GBE13_SLAVE_PORT2_OFFSET	0x200
+#define GBE13_HW_STATS_OFFSET		0x300
+#define GBE13_CPTS_OFFSET		0x500
+#define GBE13_ALE_OFFSET		0x600
+#define GBE13_HOST_PORT_NUM		0
+#define GBE13_NUM_ALE_ENTRIES		1024
+
+/* 1G Ethernet NU SS defines */
+#define GBENU_MODULE_NAME		"netcp-gbenu"
+#define GBE_SS_ID_NU			0x4ee6
+#define GBE_SS_ID_2U			0x4ee8
+
+#define IS_SS_ID_MU(d) \
+	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
+	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
+
+#define IS_SS_ID_NU(d) \
+	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
+
+#define IS_SS_ID_VER_14(d) \
+	(GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
+#define IS_SS_ID_2U(d) \
+	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
+
+#define GBENU_SS_REG_INDEX		0
+#define GBENU_SM_REG_INDEX		1
+#define GBENU_SGMII_MODULE_OFFSET	0x100
+#define GBENU_HOST_PORT_OFFSET		0x1000
+#define GBENU_SLAVE_PORT_OFFSET		0x2000
+#define GBENU_EMAC_OFFSET		0x2330
+#define GBENU_HW_STATS_OFFSET		0x1a000
+#define GBENU_CPTS_OFFSET		0x1d000
+#define GBENU_ALE_OFFSET		0x1e000
+#define GBENU_HOST_PORT_NUM		0
+#define GBENU_SGMII_MODULE_SIZE		0x100
+
+/* 10G Ethernet SS defines */
+#define XGBE_MODULE_NAME		"netcp-xgbe"
+#define XGBE_SS_VERSION_10		0x4ee4
+
+#define XGBE_SS_REG_INDEX		0
+#define XGBE_SM_REG_INDEX		1
+#define XGBE_SERDES_REG_INDEX		2
+
+/* offset relative to base of XGBE_SS_REG_INDEX */
+#define XGBE10_SGMII_MODULE_OFFSET	0x100
+#define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
+/* offset relative to base of XGBE_SM_REG_INDEX */
+#define XGBE10_HOST_PORT_OFFSET		0x34
+#define XGBE10_SLAVE_PORT_OFFSET	0x64
+#define XGBE10_EMAC_OFFSET		0x400
+#define XGBE10_CPTS_OFFSET		0x600
+#define XGBE10_ALE_OFFSET		0x700
+#define XGBE10_HW_STATS_OFFSET		0x800
+#define XGBE10_HOST_PORT_NUM		0
+#define XGBE10_NUM_ALE_ENTRIES		2048
+
+#define	GBE_TIMER_INTERVAL			(HZ / 2)
+
+/* Soft reset register values */
+#define SOFT_RESET_MASK				BIT(0)
+#define SOFT_RESET				BIT(0)
+#define DEVICE_EMACSL_RESET_POLL_COUNT		100
+#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
+
+#define MACSL_RX_ENABLE_CSF			BIT(23)
+#define MACSL_ENABLE_EXT_CTL			BIT(18)
+#define MACSL_XGMII_ENABLE			BIT(13)
+#define MACSL_XGIG_MODE				BIT(8)
+#define MACSL_GIG_MODE				BIT(7)
+#define MACSL_GMII_ENABLE			BIT(5)
+#define MACSL_FULLDUPLEX			BIT(0)
+
+#define GBE_CTL_P0_ENABLE			BIT(2)
+#define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
+#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
+#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
+#define GBE_STATS_CD_SEL			BIT(28)
+
+#define GBE_PORT_MASK(x)			(BIT(x) - 1)
+#define GBE_MASK_NO_PORTS			0
+
+#define GBE_DEF_1G_MAC_CONTROL					\
+		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
+		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
+
+#define GBE_DEF_10G_MAC_CONTROL				\
+		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
+		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
+
+#define GBE_STATSA_MODULE			0
+#define GBE_STATSB_MODULE			1
+#define GBE_STATSC_MODULE			2
+#define GBE_STATSD_MODULE			3
+
+#define GBENU_STATS0_MODULE			0
+#define GBENU_STATS1_MODULE			1
+#define GBENU_STATS2_MODULE			2
+#define GBENU_STATS3_MODULE			3
+#define GBENU_STATS4_MODULE			4
+#define GBENU_STATS5_MODULE			5
+#define GBENU_STATS6_MODULE			6
+#define GBENU_STATS7_MODULE			7
+#define GBENU_STATS8_MODULE			8
+
+#define XGBE_STATS0_MODULE			0
+#define XGBE_STATS1_MODULE			1
+#define XGBE_STATS2_MODULE			2
+
+/* s: 0-based slave_port */
+#define SGMII_BASE(d, s) \
+	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
+
+#define GBE_TX_QUEUE				648
+#define	GBE_TXHOOK_ORDER			0
+#define	GBE_RXHOOK_ORDER			0
+#define GBE_DEFAULT_ALE_AGEOUT			30
+#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
+#define SLAVE_LINK_IS_RGMII(s) \
+	(((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
+	 ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
+#define SLAVE_LINK_IS_SGMII(s) \
+	((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
+#define NETCP_LINK_STATE_INVALID		-1
+
+#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+		offsetof(struct gbe##_##rb, rn)
+#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+		offsetof(struct gbenu##_##rb, rn)
+#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+		offsetof(struct xgbe##_##rb, rn)
+#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
+
+#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+/* Px_TS_CTL register fields */
+#define TS_RX_ANX_F_EN				BIT(0)
+#define TS_RX_VLAN_LT1_EN			BIT(1)
+#define TS_RX_VLAN_LT2_EN			BIT(2)
+#define TS_RX_ANX_D_EN				BIT(3)
+#define TS_TX_ANX_F_EN				BIT(4)
+#define TS_TX_VLAN_LT1_EN			BIT(5)
+#define TS_TX_VLAN_LT2_EN			BIT(6)
+#define TS_TX_ANX_D_EN				BIT(7)
+#define TS_LT2_EN				BIT(8)
+#define TS_RX_ANX_E_EN				BIT(9)
+#define TS_TX_ANX_E_EN				BIT(10)
+#define TS_MSG_TYPE_EN_SHIFT			16
+#define TS_MSG_TYPE_EN_MASK			0xffff
+
+/* Px_TS_SEQ_LTYPE register fields */
+#define TS_SEQ_ID_OFS_SHIFT			16
+#define TS_SEQ_ID_OFS_MASK			0x3f
+
+/* Px_TS_CTL_LTYPE2 register fields */
+#define TS_107					BIT(16)
+#define TS_129					BIT(17)
+#define TS_130					BIT(18)
+#define TS_131					BIT(19)
+#define TS_132					BIT(20)
+#define TS_319					BIT(21)
+#define TS_320					BIT(22)
+#define TS_TTL_NONZERO				BIT(23)
+#define TS_UNI_EN				BIT(24)
+#define TS_UNI_EN_SHIFT				24
+
+#define TS_TX_ANX_ALL_EN	 \
+	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
+
+#define TS_RX_ANX_ALL_EN	 \
+	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
+
+#define TS_CTL_DST_PORT				TS_319
+#define TS_CTL_DST_PORT_SHIFT			21
+
+#define TS_CTL_MADDR_ALL	\
+	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
+
+#define TS_CTL_MADDR_SHIFT			16
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#endif /* CONFIG_TI_CPTS */
+
+struct xgbe_ss_regs {
+	u32	id_ver;
+	u32	synce_count;
+	u32	synce_mux;
+	u32	control;
+};
+
+struct xgbe_switch_regs {
+	u32	id_ver;
+	u32	control;
+	u32	emcontrol;
+	u32	stat_port_en;
+	u32	ptype;
+	u32	soft_idle;
+	u32	thru_rate;
+	u32	gap_thresh;
+	u32	tx_start_wds;
+	u32	flow_control;
+	u32	cppi_thresh;
+};
+
+struct xgbe_port_regs {
+	u32	blk_cnt;
+	u32	port_vlan;
+	u32	tx_pri_map;
+	u32	sa_lo;
+	u32	sa_hi;
+	u32	ts_ctl;
+	u32	ts_seq_ltype;
+	u32	ts_vlan;
+	u32	ts_ctl_ltype2;
+	u32	ts_ctl2;
+	u32	control;
+};
+
+struct xgbe_host_port_regs {
+	u32	blk_cnt;
+	u32	port_vlan;
+	u32	tx_pri_map;
+	u32	src_id;
+	u32	rx_pri_map;
+	u32	rx_maxlen;
+};
+
+struct xgbe_emac_regs {
+	u32	id_ver;
+	u32	mac_control;
+	u32	mac_status;
+	u32	soft_reset;
+	u32	rx_maxlen;
+	u32	__reserved_0;
+	u32	rx_pause;
+	u32	tx_pause;
+	u32	em_control;
+	u32	__reserved_1;
+	u32	tx_gap;
+	u32	rsvd[4];
+};
+
+struct xgbe_host_hw_stats {
+	u32	rx_good_frames;
+	u32	rx_broadcast_frames;
+	u32	rx_multicast_frames;
+	u32	__rsvd_0[3];
+	u32	rx_oversized_frames;
+	u32	__rsvd_1;
+	u32	rx_undersized_frames;
+	u32	__rsvd_2;
+	u32	overrun_type4;
+	u32	overrun_type5;
+	u32	rx_bytes;
+	u32	tx_good_frames;
+	u32	tx_broadcast_frames;
+	u32	tx_multicast_frames;
+	u32	__rsvd_3[9];
+	u32	tx_bytes;
+	u32	tx_64byte_frames;
+	u32	tx_65_to_127byte_frames;
+	u32	tx_128_to_255byte_frames;
+	u32	tx_256_to_511byte_frames;
+	u32	tx_512_to_1023byte_frames;
+	u32	tx_1024byte_frames;
+	u32	net_bytes;
+	u32	rx_sof_overruns;
+	u32	rx_mof_overruns;
+	u32	rx_dma_overruns;
+};
+
+struct xgbe_hw_stats {
+	u32	rx_good_frames;
+	u32	rx_broadcast_frames;
+	u32	rx_multicast_frames;
+	u32	rx_pause_frames;
+	u32	rx_crc_errors;
+	u32	rx_align_code_errors;
+	u32	rx_oversized_frames;
+	u32	rx_jabber_frames;
+	u32	rx_undersized_frames;
+	u32	rx_fragments;
+	u32	overrun_type4;
+	u32	overrun_type5;
+	u32	rx_bytes;
+	u32	tx_good_frames;
+	u32	tx_broadcast_frames;
+	u32	tx_multicast_frames;
+	u32	tx_pause_frames;
+	u32	tx_deferred_frames;
+	u32	tx_collision_frames;
+	u32	tx_single_coll_frames;
+	u32	tx_mult_coll_frames;
+	u32	tx_excessive_collisions;
+	u32	tx_late_collisions;
+	u32	tx_underrun;
+	u32	tx_carrier_sense_errors;
+	u32	tx_bytes;
+	u32	tx_64byte_frames;
+	u32	tx_65_to_127byte_frames;
+	u32	tx_128_to_255byte_frames;
+	u32	tx_256_to_511byte_frames;
+	u32	tx_512_to_1023byte_frames;
+	u32	tx_1024byte_frames;
+	u32	net_bytes;
+	u32	rx_sof_overruns;
+	u32	rx_mof_overruns;
+	u32	rx_dma_overruns;
+};
+
+struct gbenu_ss_regs {
+	u32	id_ver;
+	u32	synce_count;		/* NU */
+	u32	synce_mux;		/* NU */
+	u32	control;		/* 2U */
+	u32	__rsvd_0[2];		/* 2U */
+	u32	rgmii_status;		/* 2U */
+	u32	ss_status;		/* 2U */
+};
+
+struct gbenu_switch_regs {
+	u32	id_ver;
+	u32	control;
+	u32	__rsvd_0[2];
+	u32	emcontrol;
+	u32	stat_port_en;
+	u32	ptype;			/* NU */
+	u32	soft_idle;
+	u32	thru_rate;		/* NU */
+	u32	gap_thresh;		/* NU */
+	u32	tx_start_wds;		/* NU */
+	u32	eee_prescale;		/* 2U */
+	u32	tx_g_oflow_thresh_set;	/* NU */
+	u32	tx_g_oflow_thresh_clr;	/* NU */
+	u32	tx_g_buf_thresh_set_l;	/* NU */
+	u32	tx_g_buf_thresh_set_h;	/* NU */
+	u32	tx_g_buf_thresh_clr_l;	/* NU */
+	u32	tx_g_buf_thresh_clr_h;	/* NU */
+};
+
+struct gbenu_port_regs {
+	u32	__rsvd_0;
+	u32	control;
+	u32	max_blks;		/* 2U */
+	u32	mem_align1;
+	u32	blk_cnt;
+	u32	port_vlan;
+	u32	tx_pri_map;		/* NU */
+	u32	pri_ctl;		/* 2U */
+	u32	rx_pri_map;
+	u32	rx_maxlen;
+	u32	tx_blks_pri;		/* NU */
+	u32	__rsvd_1;
+	u32	idle2lpi;		/* 2U */
+	u32	lpi2idle;		/* 2U */
+	u32	eee_status;		/* 2U */
+	u32	__rsvd_2;
+	u32	__rsvd_3[176];		/* NU: more to add */
+	u32	__rsvd_4[2];
+	u32	sa_lo;
+	u32	sa_hi;
+	u32	ts_ctl;
+	u32	ts_seq_ltype;
+	u32	ts_vlan;
+	u32	ts_ctl_ltype2;
+	u32	ts_ctl2;
+};
+
+struct gbenu_host_port_regs {
+	u32	__rsvd_0;
+	u32	control;
+	u32	flow_id_offset;		/* 2U */
+	u32	__rsvd_1;
+	u32	blk_cnt;
+	u32	port_vlan;
+	u32	tx_pri_map;		/* NU */
+	u32	pri_ctl;
+	u32	rx_pri_map;
+	u32	rx_maxlen;
+	u32	tx_blks_pri;		/* NU */
+	u32	__rsvd_2;
+	u32	idle2lpi;		/* 2U */
+	u32	lpi2wake;		/* 2U */
+	u32	eee_status;		/* 2U */
+	u32	__rsvd_3;
+	u32	__rsvd_4[184];		/* NU */
+	u32	host_blks_pri;		/* NU */
+};
+
+struct gbenu_emac_regs {
+	u32	mac_control;
+	u32	mac_status;
+	u32	soft_reset;
+	u32	boff_test;
+	u32	rx_pause;
+	u32	__rsvd_0[11];		/* NU */
+	u32	tx_pause;
+	u32	__rsvd_1[11];		/* NU */
+	u32	em_control;
+	u32	tx_gap;
+};
+
+/* Some hw stat regs are applicable to slave port only.
+ * This is handled by gbenu_et_stats struct.  Also some
+ * are for SS version NU and some are for 2U.
+ */
+struct gbenu_hw_stats {
+	u32	rx_good_frames;
+	u32	rx_broadcast_frames;
+	u32	rx_multicast_frames;
+	u32	rx_pause_frames;		/* slave */
+	u32	rx_crc_errors;
+	u32	rx_align_code_errors;		/* slave */
+	u32	rx_oversized_frames;
+	u32	rx_jabber_frames;		/* slave */
+	u32	rx_undersized_frames;
+	u32	rx_fragments;			/* slave */
+	u32	ale_drop;
+	u32	ale_overrun_drop;
+	u32	rx_bytes;
+	u32	tx_good_frames;
+	u32	tx_broadcast_frames;
+	u32	tx_multicast_frames;
+	u32	tx_pause_frames;		/* slave */
+	u32	tx_deferred_frames;		/* slave */
+	u32	tx_collision_frames;		/* slave */
+	u32	tx_single_coll_frames;		/* slave */
+	u32	tx_mult_coll_frames;		/* slave */
+	u32	tx_excessive_collisions;	/* slave */
+	u32	tx_late_collisions;		/* slave */
+	u32	rx_ipg_error;			/* slave 10G only */
+	u32	tx_carrier_sense_errors;	/* slave */
+	u32	tx_bytes;
+	u32	tx_64B_frames;
+	u32	tx_65_to_127B_frames;
+	u32	tx_128_to_255B_frames;
+	u32	tx_256_to_511B_frames;
+	u32	tx_512_to_1023B_frames;
+	u32	tx_1024B_frames;
+	u32	net_bytes;
+	u32	rx_bottom_fifo_drop;
+	u32	rx_port_mask_drop;
+	u32	rx_top_fifo_drop;
+	u32	ale_rate_limit_drop;
+	u32	ale_vid_ingress_drop;
+	u32	ale_da_eq_sa_drop;
+	u32	__rsvd_0[3];
+	u32	ale_unknown_ucast;
+	u32	ale_unknown_ucast_bytes;
+	u32	ale_unknown_mcast;
+	u32	ale_unknown_mcast_bytes;
+	u32	ale_unknown_bcast;
+	u32	ale_unknown_bcast_bytes;
+	u32	ale_pol_match;
+	u32	ale_pol_match_red;		/* NU */
+	u32	ale_pol_match_yellow;		/* NU */
+	u32	__rsvd_1[44];
+	u32	tx_mem_protect_err;
+	/* following NU only */
+	u32	tx_pri0;
+	u32	tx_pri1;
+	u32	tx_pri2;
+	u32	tx_pri3;
+	u32	tx_pri4;
+	u32	tx_pri5;
+	u32	tx_pri6;
+	u32	tx_pri7;
+	u32	tx_pri0_bcnt;
+	u32	tx_pri1_bcnt;
+	u32	tx_pri2_bcnt;
+	u32	tx_pri3_bcnt;
+	u32	tx_pri4_bcnt;
+	u32	tx_pri5_bcnt;
+	u32	tx_pri6_bcnt;
+	u32	tx_pri7_bcnt;
+	u32	tx_pri0_drop;
+	u32	tx_pri1_drop;
+	u32	tx_pri2_drop;
+	u32	tx_pri3_drop;
+	u32	tx_pri4_drop;
+	u32	tx_pri5_drop;
+	u32	tx_pri6_drop;
+	u32	tx_pri7_drop;
+	u32	tx_pri0_drop_bcnt;
+	u32	tx_pri1_drop_bcnt;
+	u32	tx_pri2_drop_bcnt;
+	u32	tx_pri3_drop_bcnt;
+	u32	tx_pri4_drop_bcnt;
+	u32	tx_pri5_drop_bcnt;
+	u32	tx_pri6_drop_bcnt;
+	u32	tx_pri7_drop_bcnt;
+};
+
+#define GBENU_HW_STATS_REG_MAP_SZ	0x200
+
+struct gbe_ss_regs {
+	u32	id_ver;
+	u32	synce_count;
+	u32	synce_mux;
+};
+
+struct gbe_ss_regs_ofs {
+	u16	id_ver;
+	u16	control;
+	u16	rgmii_status; /* 2U */
+};
+
+struct gbe_switch_regs {
+	u32	id_ver;
+	u32	control;
+	u32	soft_reset;
+	u32	stat_port_en;
+	u32	ptype;
+	u32	soft_idle;
+	u32	thru_rate;
+	u32	gap_thresh;
+	u32	tx_start_wds;
+	u32	flow_control;
+};
+
+struct gbe_switch_regs_ofs {
+	u16	id_ver;
+	u16	control;
+	u16	soft_reset;
+	u16	emcontrol;
+	u16	stat_port_en;
+	u16	ptype;
+	u16	flow_control;
+};
+
+struct gbe_port_regs {
+	u32	max_blks;
+	u32	blk_cnt;
+	u32	port_vlan;
+	u32	tx_pri_map;
+	u32	sa_lo;
+	u32	sa_hi;
+	u32	ts_ctl;
+	u32	ts_seq_ltype;
+	u32	ts_vlan;
+	u32	ts_ctl_ltype2;
+	u32	ts_ctl2;
+};
+
+struct gbe_port_regs_ofs {
+	u16	port_vlan;
+	u16	tx_pri_map;
+	u16     rx_pri_map;
+	u16	sa_lo;
+	u16	sa_hi;
+	u16	ts_ctl;
+	u16	ts_seq_ltype;
+	u16	ts_vlan;
+	u16	ts_ctl_ltype2;
+	u16	ts_ctl2;
+	u16	rx_maxlen;	/* 2U, NU */
+};
+
+struct gbe_host_port_regs {
+	u32	src_id;
+	u32	port_vlan;
+	u32	rx_pri_map;
+	u32	rx_maxlen;
+};
+
+struct gbe_host_port_regs_ofs {
+	u16	port_vlan;
+	u16	tx_pri_map;
+	u16	rx_maxlen;
+};
+
+struct gbe_emac_regs {
+	u32	id_ver;
+	u32	mac_control;
+	u32	mac_status;
+	u32	soft_reset;
+	u32	rx_maxlen;
+	u32	__reserved_0;
+	u32	rx_pause;
+	u32	tx_pause;
+	u32	__reserved_1;
+	u32	rx_pri_map;
+	u32	rsvd[6];
+};
+
+struct gbe_emac_regs_ofs {
+	u16	mac_control;
+	u16	soft_reset;
+	u16	rx_maxlen;
+};
+
+struct gbe_hw_stats {
+	u32	rx_good_frames;
+	u32	rx_broadcast_frames;
+	u32	rx_multicast_frames;
+	u32	rx_pause_frames;
+	u32	rx_crc_errors;
+	u32	rx_align_code_errors;
+	u32	rx_oversized_frames;
+	u32	rx_jabber_frames;
+	u32	rx_undersized_frames;
+	u32	rx_fragments;
+	u32	__pad_0[2];
+	u32	rx_bytes;
+	u32	tx_good_frames;
+	u32	tx_broadcast_frames;
+	u32	tx_multicast_frames;
+	u32	tx_pause_frames;
+	u32	tx_deferred_frames;
+	u32	tx_collision_frames;
+	u32	tx_single_coll_frames;
+	u32	tx_mult_coll_frames;
+	u32	tx_excessive_collisions;
+	u32	tx_late_collisions;
+	u32	tx_underrun;
+	u32	tx_carrier_sense_errors;
+	u32	tx_bytes;
+	u32	tx_64byte_frames;
+	u32	tx_65_to_127byte_frames;
+	u32	tx_128_to_255byte_frames;
+	u32	tx_256_to_511byte_frames;
+	u32	tx_512_to_1023byte_frames;
+	u32	tx_1024byte_frames;
+	u32	net_bytes;
+	u32	rx_sof_overruns;
+	u32	rx_mof_overruns;
+	u32	rx_dma_overruns;
+};
+
+#define GBE_MAX_HW_STAT_MODS			9
+#define GBE_HW_STATS_REG_MAP_SZ			0x100
+
+struct ts_ctl {
+	int     uni;
+	u8      dst_port_map;
+	u8      maddr_map;
+	u8      ts_mcast_type;
+};
+
+struct gbe_slave {
+	void __iomem			*port_regs;
+	void __iomem			*emac_regs;
+	struct gbe_port_regs_ofs	port_regs_ofs;
+	struct gbe_emac_regs_ofs	emac_regs_ofs;
+	int				slave_num; /* 0 based logical number */
+	int				port_num;  /* actual port number */
+	atomic_t			link_state;
+	bool				open;
+	struct phy_device		*phy;
+	u32				link_interface;
+	u32				mac_control;
+	u8				phy_port_t;
+	struct device_node		*node;
+	struct device_node		*phy_node;
+	struct ts_ctl                   ts_ctl;
+	struct list_head		slave_list;
+};
+
+struct gbe_priv {
+	struct device			*dev;
+	struct netcp_device		*netcp_device;
+	struct timer_list		timer;
+	u32				num_slaves;
+	u32				ale_entries;
+	u32				ale_ports;
+	bool				enable_ale;
+	u8				max_num_slaves;
+	u8				max_num_ports; /* max_num_slaves + 1 */
+	u8				num_stats_mods;
+	struct netcp_tx_pipe		tx_pipe;
+
+	int				host_port;
+	u32				rx_packet_max;
+	u32				ss_version;
+	u32				stats_en_mask;
+
+	void __iomem			*ss_regs;
+	void __iomem			*switch_regs;
+	void __iomem			*host_port_regs;
+	void __iomem			*ale_reg;
+	void __iomem                    *cpts_reg;
+	void __iomem			*sgmii_port_regs;
+	void __iomem			*sgmii_port34_regs;
+	void __iomem			*xgbe_serdes_regs;
+	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
+
+	struct gbe_ss_regs_ofs		ss_regs_ofs;
+	struct gbe_switch_regs_ofs	switch_regs_ofs;
+	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
+
+	struct cpsw_ale			*ale;
+	unsigned int			tx_queue_id;
+	const char			*dma_chan_name;
+
+	struct list_head		gbe_intf_head;
+	struct list_head		secondary_slaves;
+	struct net_device		*dummy_ndev;
+
+	u64				*hw_stats;
+	u32				*hw_stats_prev;
+	const struct netcp_ethtool_stat *et_stats;
+	int				num_et_stats;
+	/*  Lock for updating the hwstats */
+	spinlock_t			hw_stats_lock;
+
+	int                             cpts_registered;
+	struct cpts                     *cpts;
+};
+
+struct gbe_intf {
+	struct net_device	*ndev;
+	struct device		*dev;
+	struct gbe_priv		*gbe_dev;
+	struct netcp_tx_pipe	tx_pipe;
+	struct gbe_slave	*slave;
+	struct list_head	gbe_intf_list;
+	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+};
+
+static struct netcp_module gbe_module;
+static struct netcp_module xgbe_module;
+
+/* Statistic management */
+struct netcp_ethtool_stat {
+	char desc[ETH_GSTRING_LEN];
+	int type;
+	u32 size;
+	int offset;
+};
+
+#define GBE_STATSA_INFO(field)						\
+{									\
+	"GBE_A:"#field, GBE_STATSA_MODULE,				\
+	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
+	offsetof(struct gbe_hw_stats, field)				\
+}
+
+#define GBE_STATSB_INFO(field)						\
+{									\
+	"GBE_B:"#field, GBE_STATSB_MODULE,				\
+	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
+	offsetof(struct gbe_hw_stats, field)				\
+}
+
+#define GBE_STATSC_INFO(field)						\
+{									\
+	"GBE_C:"#field, GBE_STATSC_MODULE,				\
+	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
+	offsetof(struct gbe_hw_stats, field)				\
+}
+
+#define GBE_STATSD_INFO(field)						\
+{									\
+	"GBE_D:"#field, GBE_STATSD_MODULE,				\
+	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
+	offsetof(struct gbe_hw_stats, field)				\
+}
+
+static const struct netcp_ethtool_stat gbe13_et_stats[] = {
+	/* GBE module A */
+	GBE_STATSA_INFO(rx_good_frames),
+	GBE_STATSA_INFO(rx_broadcast_frames),
+	GBE_STATSA_INFO(rx_multicast_frames),
+	GBE_STATSA_INFO(rx_pause_frames),
+	GBE_STATSA_INFO(rx_crc_errors),
+	GBE_STATSA_INFO(rx_align_code_errors),
+	GBE_STATSA_INFO(rx_oversized_frames),
+	GBE_STATSA_INFO(rx_jabber_frames),
+	GBE_STATSA_INFO(rx_undersized_frames),
+	GBE_STATSA_INFO(rx_fragments),
+	GBE_STATSA_INFO(rx_bytes),
+	GBE_STATSA_INFO(tx_good_frames),
+	GBE_STATSA_INFO(tx_broadcast_frames),
+	GBE_STATSA_INFO(tx_multicast_frames),
+	GBE_STATSA_INFO(tx_pause_frames),
+	GBE_STATSA_INFO(tx_deferred_frames),
+	GBE_STATSA_INFO(tx_collision_frames),
+	GBE_STATSA_INFO(tx_single_coll_frames),
+	GBE_STATSA_INFO(tx_mult_coll_frames),
+	GBE_STATSA_INFO(tx_excessive_collisions),
+	GBE_STATSA_INFO(tx_late_collisions),
+	GBE_STATSA_INFO(tx_underrun),
+	GBE_STATSA_INFO(tx_carrier_sense_errors),
+	GBE_STATSA_INFO(tx_bytes),
+	GBE_STATSA_INFO(tx_64byte_frames),
+	GBE_STATSA_INFO(tx_65_to_127byte_frames),
+	GBE_STATSA_INFO(tx_128_to_255byte_frames),
+	GBE_STATSA_INFO(tx_256_to_511byte_frames),
+	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
+	GBE_STATSA_INFO(tx_1024byte_frames),
+	GBE_STATSA_INFO(net_bytes),
+	GBE_STATSA_INFO(rx_sof_overruns),
+	GBE_STATSA_INFO(rx_mof_overruns),
+	GBE_STATSA_INFO(rx_dma_overruns),
+	/* GBE module B */
+	GBE_STATSB_INFO(rx_good_frames),
+	GBE_STATSB_INFO(rx_broadcast_frames),
+	GBE_STATSB_INFO(rx_multicast_frames),
+	GBE_STATSB_INFO(rx_pause_frames),
+	GBE_STATSB_INFO(rx_crc_errors),
+	GBE_STATSB_INFO(rx_align_code_errors),
+	GBE_STATSB_INFO(rx_oversized_frames),
+	GBE_STATSB_INFO(rx_jabber_frames),
+	GBE_STATSB_INFO(rx_undersized_frames),
+	GBE_STATSB_INFO(rx_fragments),
+	GBE_STATSB_INFO(rx_bytes),
+	GBE_STATSB_INFO(tx_good_frames),
+	GBE_STATSB_INFO(tx_broadcast_frames),
+	GBE_STATSB_INFO(tx_multicast_frames),
+	GBE_STATSB_INFO(tx_pause_frames),
+	GBE_STATSB_INFO(tx_deferred_frames),
+	GBE_STATSB_INFO(tx_collision_frames),
+	GBE_STATSB_INFO(tx_single_coll_frames),
+	GBE_STATSB_INFO(tx_mult_coll_frames),
+	GBE_STATSB_INFO(tx_excessive_collisions),
+	GBE_STATSB_INFO(tx_late_collisions),
+	GBE_STATSB_INFO(tx_underrun),
+	GBE_STATSB_INFO(tx_carrier_sense_errors),
+	GBE_STATSB_INFO(tx_bytes),
+	GBE_STATSB_INFO(tx_64byte_frames),
+	GBE_STATSB_INFO(tx_65_to_127byte_frames),
+	GBE_STATSB_INFO(tx_128_to_255byte_frames),
+	GBE_STATSB_INFO(tx_256_to_511byte_frames),
+	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
+	GBE_STATSB_INFO(tx_1024byte_frames),
+	GBE_STATSB_INFO(net_bytes),
+	GBE_STATSB_INFO(rx_sof_overruns),
+	GBE_STATSB_INFO(rx_mof_overruns),
+	GBE_STATSB_INFO(rx_dma_overruns),
+	/* GBE module C */
+	GBE_STATSC_INFO(rx_good_frames),
+	GBE_STATSC_INFO(rx_broadcast_frames),
+	GBE_STATSC_INFO(rx_multicast_frames),
+	GBE_STATSC_INFO(rx_pause_frames),
+	GBE_STATSC_INFO(rx_crc_errors),
+	GBE_STATSC_INFO(rx_align_code_errors),
+	GBE_STATSC_INFO(rx_oversized_frames),
+	GBE_STATSC_INFO(rx_jabber_frames),
+	GBE_STATSC_INFO(rx_undersized_frames),
+	GBE_STATSC_INFO(rx_fragments),
+	GBE_STATSC_INFO(rx_bytes),
+	GBE_STATSC_INFO(tx_good_frames),
+	GBE_STATSC_INFO(tx_broadcast_frames),
+	GBE_STATSC_INFO(tx_multicast_frames),
+	GBE_STATSC_INFO(tx_pause_frames),
+	GBE_STATSC_INFO(tx_deferred_frames),
+	GBE_STATSC_INFO(tx_collision_frames),
+	GBE_STATSC_INFO(tx_single_coll_frames),
+	GBE_STATSC_INFO(tx_mult_coll_frames),
+	GBE_STATSC_INFO(tx_excessive_collisions),
+	GBE_STATSC_INFO(tx_late_collisions),
+	GBE_STATSC_INFO(tx_underrun),
+	GBE_STATSC_INFO(tx_carrier_sense_errors),
+	GBE_STATSC_INFO(tx_bytes),
+	GBE_STATSC_INFO(tx_64byte_frames),
+	GBE_STATSC_INFO(tx_65_to_127byte_frames),
+	GBE_STATSC_INFO(tx_128_to_255byte_frames),
+	GBE_STATSC_INFO(tx_256_to_511byte_frames),
+	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
+	GBE_STATSC_INFO(tx_1024byte_frames),
+	GBE_STATSC_INFO(net_bytes),
+	GBE_STATSC_INFO(rx_sof_overruns),
+	GBE_STATSC_INFO(rx_mof_overruns),
+	GBE_STATSC_INFO(rx_dma_overruns),
+	/* GBE module D */
+	GBE_STATSD_INFO(rx_good_frames),
+	GBE_STATSD_INFO(rx_broadcast_frames),
+	GBE_STATSD_INFO(rx_multicast_frames),
+	GBE_STATSD_INFO(rx_pause_frames),
+	GBE_STATSD_INFO(rx_crc_errors),
+	GBE_STATSD_INFO(rx_align_code_errors),
+	GBE_STATSD_INFO(rx_oversized_frames),
+	GBE_STATSD_INFO(rx_jabber_frames),
+	GBE_STATSD_INFO(rx_undersized_frames),
+	GBE_STATSD_INFO(rx_fragments),
+	GBE_STATSD_INFO(rx_bytes),
+	GBE_STATSD_INFO(tx_good_frames),
+	GBE_STATSD_INFO(tx_broadcast_frames),
+	GBE_STATSD_INFO(tx_multicast_frames),
+	GBE_STATSD_INFO(tx_pause_frames),
+	GBE_STATSD_INFO(tx_deferred_frames),
+	GBE_STATSD_INFO(tx_collision_frames),
+	GBE_STATSD_INFO(tx_single_coll_frames),
+	GBE_STATSD_INFO(tx_mult_coll_frames),
+	GBE_STATSD_INFO(tx_excessive_collisions),
+	GBE_STATSD_INFO(tx_late_collisions),
+	GBE_STATSD_INFO(tx_underrun),
+	GBE_STATSD_INFO(tx_carrier_sense_errors),
+	GBE_STATSD_INFO(tx_bytes),
+	GBE_STATSD_INFO(tx_64byte_frames),
+	GBE_STATSD_INFO(tx_65_to_127byte_frames),
+	GBE_STATSD_INFO(tx_128_to_255byte_frames),
+	GBE_STATSD_INFO(tx_256_to_511byte_frames),
+	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
+	GBE_STATSD_INFO(tx_1024byte_frames),
+	GBE_STATSD_INFO(net_bytes),
+	GBE_STATSD_INFO(rx_sof_overruns),
+	GBE_STATSD_INFO(rx_mof_overruns),
+	GBE_STATSD_INFO(rx_dma_overruns),
+};
+
+/* This is the size of entries in GBENU_STATS_HOST */
+#define GBENU_ET_STATS_HOST_SIZE	52
+
+#define GBENU_STATS_HOST(field)					\
+{								\
+	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE	65
+
+#define GBENU_STATS_P1(field)					\
+{								\
+	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P2(field)					\
+{								\
+	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P3(field)					\
+{								\
+	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P4(field)					\
+{								\
+	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P5(field)					\
+{								\
+	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P6(field)					\
+{								\
+	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P7(field)					\
+{								\
+	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+#define GBENU_STATS_P8(field)					\
+{								\
+	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
+	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
+	offsetof(struct gbenu_hw_stats, field)			\
+}
+
+static const struct netcp_ethtool_stat gbenu_et_stats[] = {
+	/* GBENU Host Module */
+	GBENU_STATS_HOST(rx_good_frames),
+	GBENU_STATS_HOST(rx_broadcast_frames),
+	GBENU_STATS_HOST(rx_multicast_frames),
+	GBENU_STATS_HOST(rx_crc_errors),
+	GBENU_STATS_HOST(rx_oversized_frames),
+	GBENU_STATS_HOST(rx_undersized_frames),
+	GBENU_STATS_HOST(ale_drop),
+	GBENU_STATS_HOST(ale_overrun_drop),
+	GBENU_STATS_HOST(rx_bytes),
+	GBENU_STATS_HOST(tx_good_frames),
+	GBENU_STATS_HOST(tx_broadcast_frames),
+	GBENU_STATS_HOST(tx_multicast_frames),
+	GBENU_STATS_HOST(tx_bytes),
+	GBENU_STATS_HOST(tx_64B_frames),
+	GBENU_STATS_HOST(tx_65_to_127B_frames),
+	GBENU_STATS_HOST(tx_128_to_255B_frames),
+	GBENU_STATS_HOST(tx_256_to_511B_frames),
+	GBENU_STATS_HOST(tx_512_to_1023B_frames),
+	GBENU_STATS_HOST(tx_1024B_frames),
+	GBENU_STATS_HOST(net_bytes),
+	GBENU_STATS_HOST(rx_bottom_fifo_drop),
+	GBENU_STATS_HOST(rx_port_mask_drop),
+	GBENU_STATS_HOST(rx_top_fifo_drop),
+	GBENU_STATS_HOST(ale_rate_limit_drop),
+	GBENU_STATS_HOST(ale_vid_ingress_drop),
+	GBENU_STATS_HOST(ale_da_eq_sa_drop),
+	GBENU_STATS_HOST(ale_unknown_ucast),
+	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
+	GBENU_STATS_HOST(ale_unknown_mcast),
+	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
+	GBENU_STATS_HOST(ale_unknown_bcast),
+	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+	GBENU_STATS_HOST(ale_pol_match),
+	GBENU_STATS_HOST(ale_pol_match_red),
+	GBENU_STATS_HOST(ale_pol_match_yellow),
+	GBENU_STATS_HOST(tx_mem_protect_err),
+	GBENU_STATS_HOST(tx_pri0_drop),
+	GBENU_STATS_HOST(tx_pri1_drop),
+	GBENU_STATS_HOST(tx_pri2_drop),
+	GBENU_STATS_HOST(tx_pri3_drop),
+	GBENU_STATS_HOST(tx_pri4_drop),
+	GBENU_STATS_HOST(tx_pri5_drop),
+	GBENU_STATS_HOST(tx_pri6_drop),
+	GBENU_STATS_HOST(tx_pri7_drop),
+	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
+	/* GBENU Module 1 */
+	GBENU_STATS_P1(rx_good_frames),
+	GBENU_STATS_P1(rx_broadcast_frames),
+	GBENU_STATS_P1(rx_multicast_frames),
+	GBENU_STATS_P1(rx_pause_frames),
+	GBENU_STATS_P1(rx_crc_errors),
+	GBENU_STATS_P1(rx_align_code_errors),
+	GBENU_STATS_P1(rx_oversized_frames),
+	GBENU_STATS_P1(rx_jabber_frames),
+	GBENU_STATS_P1(rx_undersized_frames),
+	GBENU_STATS_P1(rx_fragments),
+	GBENU_STATS_P1(ale_drop),
+	GBENU_STATS_P1(ale_overrun_drop),
+	GBENU_STATS_P1(rx_bytes),
+	GBENU_STATS_P1(tx_good_frames),
+	GBENU_STATS_P1(tx_broadcast_frames),
+	GBENU_STATS_P1(tx_multicast_frames),
+	GBENU_STATS_P1(tx_pause_frames),
+	GBENU_STATS_P1(tx_deferred_frames),
+	GBENU_STATS_P1(tx_collision_frames),
+	GBENU_STATS_P1(tx_single_coll_frames),
+	GBENU_STATS_P1(tx_mult_coll_frames),
+	GBENU_STATS_P1(tx_excessive_collisions),
+	GBENU_STATS_P1(tx_late_collisions),
+	GBENU_STATS_P1(rx_ipg_error),
+	GBENU_STATS_P1(tx_carrier_sense_errors),
+	GBENU_STATS_P1(tx_bytes),
+	GBENU_STATS_P1(tx_64B_frames),
+	GBENU_STATS_P1(tx_65_to_127B_frames),
+	GBENU_STATS_P1(tx_128_to_255B_frames),
+	GBENU_STATS_P1(tx_256_to_511B_frames),
+	GBENU_STATS_P1(tx_512_to_1023B_frames),
+	GBENU_STATS_P1(tx_1024B_frames),
+	GBENU_STATS_P1(net_bytes),
+	GBENU_STATS_P1(rx_bottom_fifo_drop),
+	GBENU_STATS_P1(rx_port_mask_drop),
+	GBENU_STATS_P1(rx_top_fifo_drop),
+	GBENU_STATS_P1(ale_rate_limit_drop),
+	GBENU_STATS_P1(ale_vid_ingress_drop),
+	GBENU_STATS_P1(ale_da_eq_sa_drop),
+	GBENU_STATS_P1(ale_unknown_ucast),
+	GBENU_STATS_P1(ale_unknown_ucast_bytes),
+	GBENU_STATS_P1(ale_unknown_mcast),
+	GBENU_STATS_P1(ale_unknown_mcast_bytes),
+	GBENU_STATS_P1(ale_unknown_bcast),
+	GBENU_STATS_P1(ale_unknown_bcast_bytes),
+	GBENU_STATS_P1(ale_pol_match),
+	GBENU_STATS_P1(ale_pol_match_red),
+	GBENU_STATS_P1(ale_pol_match_yellow),
+	GBENU_STATS_P1(tx_mem_protect_err),
+	GBENU_STATS_P1(tx_pri0_drop),
+	GBENU_STATS_P1(tx_pri1_drop),
+	GBENU_STATS_P1(tx_pri2_drop),
+	GBENU_STATS_P1(tx_pri3_drop),
+	GBENU_STATS_P1(tx_pri4_drop),
+	GBENU_STATS_P1(tx_pri5_drop),
+	GBENU_STATS_P1(tx_pri6_drop),
+	GBENU_STATS_P1(tx_pri7_drop),
+	GBENU_STATS_P1(tx_pri0_drop_bcnt),
+	GBENU_STATS_P1(tx_pri1_drop_bcnt),
+	GBENU_STATS_P1(tx_pri2_drop_bcnt),
+	GBENU_STATS_P1(tx_pri3_drop_bcnt),
+	GBENU_STATS_P1(tx_pri4_drop_bcnt),
+	GBENU_STATS_P1(tx_pri5_drop_bcnt),
+	GBENU_STATS_P1(tx_pri6_drop_bcnt),
+	GBENU_STATS_P1(tx_pri7_drop_bcnt),
+	/* GBENU Module 2 */
+	GBENU_STATS_P2(rx_good_frames),
+	GBENU_STATS_P2(rx_broadcast_frames),
+	GBENU_STATS_P2(rx_multicast_frames),
+	GBENU_STATS_P2(rx_pause_frames),
+	GBENU_STATS_P2(rx_crc_errors),
+	GBENU_STATS_P2(rx_align_code_errors),
+	GBENU_STATS_P2(rx_oversized_frames),
+	GBENU_STATS_P2(rx_jabber_frames),
+	GBENU_STATS_P2(rx_undersized_frames),
+	GBENU_STATS_P2(rx_fragments),
+	GBENU_STATS_P2(ale_drop),
+	GBENU_STATS_P2(ale_overrun_drop),
+	GBENU_STATS_P2(rx_bytes),
+	GBENU_STATS_P2(tx_good_frames),
+	GBENU_STATS_P2(tx_broadcast_frames),
+	GBENU_STATS_P2(tx_multicast_frames),
+	GBENU_STATS_P2(tx_pause_frames),
+	GBENU_STATS_P2(tx_deferred_frames),
+	GBENU_STATS_P2(tx_collision_frames),
+	GBENU_STATS_P2(tx_single_coll_frames),
+	GBENU_STATS_P2(tx_mult_coll_frames),
+	GBENU_STATS_P2(tx_excessive_collisions),
+	GBENU_STATS_P2(tx_late_collisions),
+	GBENU_STATS_P2(rx_ipg_error),
+	GBENU_STATS_P2(tx_carrier_sense_errors),
+	GBENU_STATS_P2(tx_bytes),
+	GBENU_STATS_P2(tx_64B_frames),
+	GBENU_STATS_P2(tx_65_to_127B_frames),
+	GBENU_STATS_P2(tx_128_to_255B_frames),
+	GBENU_STATS_P2(tx_256_to_511B_frames),
+	GBENU_STATS_P2(tx_512_to_1023B_frames),
+	GBENU_STATS_P2(tx_1024B_frames),
+	GBENU_STATS_P2(net_bytes),
+	GBENU_STATS_P2(rx_bottom_fifo_drop),
+	GBENU_STATS_P2(rx_port_mask_drop),
+	GBENU_STATS_P2(rx_top_fifo_drop),
+	GBENU_STATS_P2(ale_rate_limit_drop),
+	GBENU_STATS_P2(ale_vid_ingress_drop),
+	GBENU_STATS_P2(ale_da_eq_sa_drop),
+	GBENU_STATS_P2(ale_unknown_ucast),
+	GBENU_STATS_P2(ale_unknown_ucast_bytes),
+	GBENU_STATS_P2(ale_unknown_mcast),
+	GBENU_STATS_P2(ale_unknown_mcast_bytes),
+	GBENU_STATS_P2(ale_unknown_bcast),
+	GBENU_STATS_P2(ale_unknown_bcast_bytes),
+	GBENU_STATS_P2(ale_pol_match),
+	GBENU_STATS_P2(ale_pol_match_red),
+	GBENU_STATS_P2(ale_pol_match_yellow),
+	GBENU_STATS_P2(tx_mem_protect_err),
+	GBENU_STATS_P2(tx_pri0_drop),
+	GBENU_STATS_P2(tx_pri1_drop),
+	GBENU_STATS_P2(tx_pri2_drop),
+	GBENU_STATS_P2(tx_pri3_drop),
+	GBENU_STATS_P2(tx_pri4_drop),
+	GBENU_STATS_P2(tx_pri5_drop),
+	GBENU_STATS_P2(tx_pri6_drop),
+	GBENU_STATS_P2(tx_pri7_drop),
+	GBENU_STATS_P2(tx_pri0_drop_bcnt),
+	GBENU_STATS_P2(tx_pri1_drop_bcnt),
+	GBENU_STATS_P2(tx_pri2_drop_bcnt),
+	GBENU_STATS_P2(tx_pri3_drop_bcnt),
+	GBENU_STATS_P2(tx_pri4_drop_bcnt),
+	GBENU_STATS_P2(tx_pri5_drop_bcnt),
+	GBENU_STATS_P2(tx_pri6_drop_bcnt),
+	GBENU_STATS_P2(tx_pri7_drop_bcnt),
+	/* GBENU Module 3 */
+	GBENU_STATS_P3(rx_good_frames),
+	GBENU_STATS_P3(rx_broadcast_frames),
+	GBENU_STATS_P3(rx_multicast_frames),
+	GBENU_STATS_P3(rx_pause_frames),
+	GBENU_STATS_P3(rx_crc_errors),
+	GBENU_STATS_P3(rx_align_code_errors),
+	GBENU_STATS_P3(rx_oversized_frames),
+	GBENU_STATS_P3(rx_jabber_frames),
+	GBENU_STATS_P3(rx_undersized_frames),
+	GBENU_STATS_P3(rx_fragments),
+	GBENU_STATS_P3(ale_drop),
+	GBENU_STATS_P3(ale_overrun_drop),
+	GBENU_STATS_P3(rx_bytes),
+	GBENU_STATS_P3(tx_good_frames),
+	GBENU_STATS_P3(tx_broadcast_frames),
+	GBENU_STATS_P3(tx_multicast_frames),
+	GBENU_STATS_P3(tx_pause_frames),
+	GBENU_STATS_P3(tx_deferred_frames),
+	GBENU_STATS_P3(tx_collision_frames),
+	GBENU_STATS_P3(tx_single_coll_frames),
+	GBENU_STATS_P3(tx_mult_coll_frames),
+	GBENU_STATS_P3(tx_excessive_collisions),
+	GBENU_STATS_P3(tx_late_collisions),
+	GBENU_STATS_P3(rx_ipg_error),
+	GBENU_STATS_P3(tx_carrier_sense_errors),
+	GBENU_STATS_P3(tx_bytes),
+	GBENU_STATS_P3(tx_64B_frames),
+	GBENU_STATS_P3(tx_65_to_127B_frames),
+	GBENU_STATS_P3(tx_128_to_255B_frames),
+	GBENU_STATS_P3(tx_256_to_511B_frames),
+	GBENU_STATS_P3(tx_512_to_1023B_frames),
+	GBENU_STATS_P3(tx_1024B_frames),
+	GBENU_STATS_P3(net_bytes),
+	GBENU_STATS_P3(rx_bottom_fifo_drop),
+	GBENU_STATS_P3(rx_port_mask_drop),
+	GBENU_STATS_P3(rx_top_fifo_drop),
+	GBENU_STATS_P3(ale_rate_limit_drop),
+	GBENU_STATS_P3(ale_vid_ingress_drop),
+	GBENU_STATS_P3(ale_da_eq_sa_drop),
+	GBENU_STATS_P3(ale_unknown_ucast),
+	GBENU_STATS_P3(ale_unknown_ucast_bytes),
+	GBENU_STATS_P3(ale_unknown_mcast),
+	GBENU_STATS_P3(ale_unknown_mcast_bytes),
+	GBENU_STATS_P3(ale_unknown_bcast),
+	GBENU_STATS_P3(ale_unknown_bcast_bytes),
+	GBENU_STATS_P3(ale_pol_match),
+	GBENU_STATS_P3(ale_pol_match_red),
+	GBENU_STATS_P3(ale_pol_match_yellow),
+	GBENU_STATS_P3(tx_mem_protect_err),
+	GBENU_STATS_P3(tx_pri0_drop),
+	GBENU_STATS_P3(tx_pri1_drop),
+	GBENU_STATS_P3(tx_pri2_drop),
+	GBENU_STATS_P3(tx_pri3_drop),
+	GBENU_STATS_P3(tx_pri4_drop),
+	GBENU_STATS_P3(tx_pri5_drop),
+	GBENU_STATS_P3(tx_pri6_drop),
+	GBENU_STATS_P3(tx_pri7_drop),
+	GBENU_STATS_P3(tx_pri0_drop_bcnt),
+	GBENU_STATS_P3(tx_pri1_drop_bcnt),
+	GBENU_STATS_P3(tx_pri2_drop_bcnt),
+	GBENU_STATS_P3(tx_pri3_drop_bcnt),
+	GBENU_STATS_P3(tx_pri4_drop_bcnt),
+	GBENU_STATS_P3(tx_pri5_drop_bcnt),
+	GBENU_STATS_P3(tx_pri6_drop_bcnt),
+	GBENU_STATS_P3(tx_pri7_drop_bcnt),
+	/* GBENU Module 4 */
+	GBENU_STATS_P4(rx_good_frames),
+	GBENU_STATS_P4(rx_broadcast_frames),
+	GBENU_STATS_P4(rx_multicast_frames),
+	GBENU_STATS_P4(rx_pause_frames),
+	GBENU_STATS_P4(rx_crc_errors),
+	GBENU_STATS_P4(rx_align_code_errors),
+	GBENU_STATS_P4(rx_oversized_frames),
+	GBENU_STATS_P4(rx_jabber_frames),
+	GBENU_STATS_P4(rx_undersized_frames),
+	GBENU_STATS_P4(rx_fragments),
+	GBENU_STATS_P4(ale_drop),
+	GBENU_STATS_P4(ale_overrun_drop),
+	GBENU_STATS_P4(rx_bytes),
+	GBENU_STATS_P4(tx_good_frames),
+	GBENU_STATS_P4(tx_broadcast_frames),
+	GBENU_STATS_P4(tx_multicast_frames),
+	GBENU_STATS_P4(tx_pause_frames),
+	GBENU_STATS_P4(tx_deferred_frames),
+	GBENU_STATS_P4(tx_collision_frames),
+	GBENU_STATS_P4(tx_single_coll_frames),
+	GBENU_STATS_P4(tx_mult_coll_frames),
+	GBENU_STATS_P4(tx_excessive_collisions),
+	GBENU_STATS_P4(tx_late_collisions),
+	GBENU_STATS_P4(rx_ipg_error),
+	GBENU_STATS_P4(tx_carrier_sense_errors),
+	GBENU_STATS_P4(tx_bytes),
+	GBENU_STATS_P4(tx_64B_frames),
+	GBENU_STATS_P4(tx_65_to_127B_frames),
+	GBENU_STATS_P4(tx_128_to_255B_frames),
+	GBENU_STATS_P4(tx_256_to_511B_frames),
+	GBENU_STATS_P4(tx_512_to_1023B_frames),
+	GBENU_STATS_P4(tx_1024B_frames),
+	GBENU_STATS_P4(net_bytes),
+	GBENU_STATS_P4(rx_bottom_fifo_drop),
+	GBENU_STATS_P4(rx_port_mask_drop),
+	GBENU_STATS_P4(rx_top_fifo_drop),
+	GBENU_STATS_P4(ale_rate_limit_drop),
+	GBENU_STATS_P4(ale_vid_ingress_drop),
+	GBENU_STATS_P4(ale_da_eq_sa_drop),
+	GBENU_STATS_P4(ale_unknown_ucast),
+	GBENU_STATS_P4(ale_unknown_ucast_bytes),
+	GBENU_STATS_P4(ale_unknown_mcast),
+	GBENU_STATS_P4(ale_unknown_mcast_bytes),
+	GBENU_STATS_P4(ale_unknown_bcast),
+	GBENU_STATS_P4(ale_unknown_bcast_bytes),
+	GBENU_STATS_P4(ale_pol_match),
+	GBENU_STATS_P4(ale_pol_match_red),
+	GBENU_STATS_P4(ale_pol_match_yellow),
+	GBENU_STATS_P4(tx_mem_protect_err),
+	GBENU_STATS_P4(tx_pri0_drop),
+	GBENU_STATS_P4(tx_pri1_drop),
+	GBENU_STATS_P4(tx_pri2_drop),
+	GBENU_STATS_P4(tx_pri3_drop),
+	GBENU_STATS_P4(tx_pri4_drop),
+	GBENU_STATS_P4(tx_pri5_drop),
+	GBENU_STATS_P4(tx_pri6_drop),
+	GBENU_STATS_P4(tx_pri7_drop),
+	GBENU_STATS_P4(tx_pri0_drop_bcnt),
+	GBENU_STATS_P4(tx_pri1_drop_bcnt),
+	GBENU_STATS_P4(tx_pri2_drop_bcnt),
+	GBENU_STATS_P4(tx_pri3_drop_bcnt),
+	GBENU_STATS_P4(tx_pri4_drop_bcnt),
+	GBENU_STATS_P4(tx_pri5_drop_bcnt),
+	GBENU_STATS_P4(tx_pri6_drop_bcnt),
+	GBENU_STATS_P4(tx_pri7_drop_bcnt),
+	/* GBENU Module 5 */
+	GBENU_STATS_P5(rx_good_frames),
+	GBENU_STATS_P5(rx_broadcast_frames),
+	GBENU_STATS_P5(rx_multicast_frames),
+	GBENU_STATS_P5(rx_pause_frames),
+	GBENU_STATS_P5(rx_crc_errors),
+	GBENU_STATS_P5(rx_align_code_errors),
+	GBENU_STATS_P5(rx_oversized_frames),
+	GBENU_STATS_P5(rx_jabber_frames),
+	GBENU_STATS_P5(rx_undersized_frames),
+	GBENU_STATS_P5(rx_fragments),
+	GBENU_STATS_P5(ale_drop),
+	GBENU_STATS_P5(ale_overrun_drop),
+	GBENU_STATS_P5(rx_bytes),
+	GBENU_STATS_P5(tx_good_frames),
+	GBENU_STATS_P5(tx_broadcast_frames),
+	GBENU_STATS_P5(tx_multicast_frames),
+	GBENU_STATS_P5(tx_pause_frames),
+	GBENU_STATS_P5(tx_deferred_frames),
+	GBENU_STATS_P5(tx_collision_frames),
+	GBENU_STATS_P5(tx_single_coll_frames),
+	GBENU_STATS_P5(tx_mult_coll_frames),
+	GBENU_STATS_P5(tx_excessive_collisions),
+	GBENU_STATS_P5(tx_late_collisions),
+	GBENU_STATS_P5(rx_ipg_error),
+	GBENU_STATS_P5(tx_carrier_sense_errors),
+	GBENU_STATS_P5(tx_bytes),
+	GBENU_STATS_P5(tx_64B_frames),
+	GBENU_STATS_P5(tx_65_to_127B_frames),
+	GBENU_STATS_P5(tx_128_to_255B_frames),
+	GBENU_STATS_P5(tx_256_to_511B_frames),
+	GBENU_STATS_P5(tx_512_to_1023B_frames),
+	GBENU_STATS_P5(tx_1024B_frames),
+	GBENU_STATS_P5(net_bytes),
+	GBENU_STATS_P5(rx_bottom_fifo_drop),
+	GBENU_STATS_P5(rx_port_mask_drop),
+	GBENU_STATS_P5(rx_top_fifo_drop),
+	GBENU_STATS_P5(ale_rate_limit_drop),
+	GBENU_STATS_P5(ale_vid_ingress_drop),
+	GBENU_STATS_P5(ale_da_eq_sa_drop),
+	GBENU_STATS_P5(ale_unknown_ucast),
+	GBENU_STATS_P5(ale_unknown_ucast_bytes),
+	GBENU_STATS_P5(ale_unknown_mcast),
+	GBENU_STATS_P5(ale_unknown_mcast_bytes),
+	GBENU_STATS_P5(ale_unknown_bcast),
+	GBENU_STATS_P5(ale_unknown_bcast_bytes),
+	GBENU_STATS_P5(ale_pol_match),
+	GBENU_STATS_P5(ale_pol_match_red),
+	GBENU_STATS_P5(ale_pol_match_yellow),
+	GBENU_STATS_P5(tx_mem_protect_err),
+	GBENU_STATS_P5(tx_pri0_drop),
+	GBENU_STATS_P5(tx_pri1_drop),
+	GBENU_STATS_P5(tx_pri2_drop),
+	GBENU_STATS_P5(tx_pri3_drop),
+	GBENU_STATS_P5(tx_pri4_drop),
+	GBENU_STATS_P5(tx_pri5_drop),
+	GBENU_STATS_P5(tx_pri6_drop),
+	GBENU_STATS_P5(tx_pri7_drop),
+	GBENU_STATS_P5(tx_pri0_drop_bcnt),
+	GBENU_STATS_P5(tx_pri1_drop_bcnt),
+	GBENU_STATS_P5(tx_pri2_drop_bcnt),
+	GBENU_STATS_P5(tx_pri3_drop_bcnt),
+	GBENU_STATS_P5(tx_pri4_drop_bcnt),
+	GBENU_STATS_P5(tx_pri5_drop_bcnt),
+	GBENU_STATS_P5(tx_pri6_drop_bcnt),
+	GBENU_STATS_P5(tx_pri7_drop_bcnt),
+	/* GBENU Module 6 */
+	GBENU_STATS_P6(rx_good_frames),
+	GBENU_STATS_P6(rx_broadcast_frames),
+	GBENU_STATS_P6(rx_multicast_frames),
+	GBENU_STATS_P6(rx_pause_frames),
+	GBENU_STATS_P6(rx_crc_errors),
+	GBENU_STATS_P6(rx_align_code_errors),
+	GBENU_STATS_P6(rx_oversized_frames),
+	GBENU_STATS_P6(rx_jabber_frames),
+	GBENU_STATS_P6(rx_undersized_frames),
+	GBENU_STATS_P6(rx_fragments),
+	GBENU_STATS_P6(ale_drop),
+	GBENU_STATS_P6(ale_overrun_drop),
+	GBENU_STATS_P6(rx_bytes),
+	GBENU_STATS_P6(tx_good_frames),
+	GBENU_STATS_P6(tx_broadcast_frames),
+	GBENU_STATS_P6(tx_multicast_frames),
+	GBENU_STATS_P6(tx_pause_frames),
+	GBENU_STATS_P6(tx_deferred_frames),
+	GBENU_STATS_P6(tx_collision_frames),
+	GBENU_STATS_P6(tx_single_coll_frames),
+	GBENU_STATS_P6(tx_mult_coll_frames),
+	GBENU_STATS_P6(tx_excessive_collisions),
+	GBENU_STATS_P6(tx_late_collisions),
+	GBENU_STATS_P6(rx_ipg_error),
+	GBENU_STATS_P6(tx_carrier_sense_errors),
+	GBENU_STATS_P6(tx_bytes),
+	GBENU_STATS_P6(tx_64B_frames),
+	GBENU_STATS_P6(tx_65_to_127B_frames),
+	GBENU_STATS_P6(tx_128_to_255B_frames),
+	GBENU_STATS_P6(tx_256_to_511B_frames),
+	GBENU_STATS_P6(tx_512_to_1023B_frames),
+	GBENU_STATS_P6(tx_1024B_frames),
+	GBENU_STATS_P6(net_bytes),
+	GBENU_STATS_P6(rx_bottom_fifo_drop),
+	GBENU_STATS_P6(rx_port_mask_drop),
+	GBENU_STATS_P6(rx_top_fifo_drop),
+	GBENU_STATS_P6(ale_rate_limit_drop),
+	GBENU_STATS_P6(ale_vid_ingress_drop),
+	GBENU_STATS_P6(ale_da_eq_sa_drop),
+	GBENU_STATS_P6(ale_unknown_ucast),
+	GBENU_STATS_P6(ale_unknown_ucast_bytes),
+	GBENU_STATS_P6(ale_unknown_mcast),
+	GBENU_STATS_P6(ale_unknown_mcast_bytes),
+	GBENU_STATS_P6(ale_unknown_bcast),
+	GBENU_STATS_P6(ale_unknown_bcast_bytes),
+	GBENU_STATS_P6(ale_pol_match),
+	GBENU_STATS_P6(ale_pol_match_red),
+	GBENU_STATS_P6(ale_pol_match_yellow),
+	GBENU_STATS_P6(tx_mem_protect_err),
+	GBENU_STATS_P6(tx_pri0_drop),
+	GBENU_STATS_P6(tx_pri1_drop),
+	GBENU_STATS_P6(tx_pri2_drop),
+	GBENU_STATS_P6(tx_pri3_drop),
+	GBENU_STATS_P6(tx_pri4_drop),
+	GBENU_STATS_P6(tx_pri5_drop),
+	GBENU_STATS_P6(tx_pri6_drop),
+	GBENU_STATS_P6(tx_pri7_drop),
+	GBENU_STATS_P6(tx_pri0_drop_bcnt),
+	GBENU_STATS_P6(tx_pri1_drop_bcnt),
+	GBENU_STATS_P6(tx_pri2_drop_bcnt),
+	GBENU_STATS_P6(tx_pri3_drop_bcnt),
+	GBENU_STATS_P6(tx_pri4_drop_bcnt),
+	GBENU_STATS_P6(tx_pri5_drop_bcnt),
+	GBENU_STATS_P6(tx_pri6_drop_bcnt),
+	GBENU_STATS_P6(tx_pri7_drop_bcnt),
+	/* GBENU Module 7 */
+	GBENU_STATS_P7(rx_good_frames),
+	GBENU_STATS_P7(rx_broadcast_frames),
+	GBENU_STATS_P7(rx_multicast_frames),
+	GBENU_STATS_P7(rx_pause_frames),
+	GBENU_STATS_P7(rx_crc_errors),
+	GBENU_STATS_P7(rx_align_code_errors),
+	GBENU_STATS_P7(rx_oversized_frames),
+	GBENU_STATS_P7(rx_jabber_frames),
+	GBENU_STATS_P7(rx_undersized_frames),
+	GBENU_STATS_P7(rx_fragments),
+	GBENU_STATS_P7(ale_drop),
+	GBENU_STATS_P7(ale_overrun_drop),
+	GBENU_STATS_P7(rx_bytes),
+	GBENU_STATS_P7(tx_good_frames),
+	GBENU_STATS_P7(tx_broadcast_frames),
+	GBENU_STATS_P7(tx_multicast_frames),
+	GBENU_STATS_P7(tx_pause_frames),
+	GBENU_STATS_P7(tx_deferred_frames),
+	GBENU_STATS_P7(tx_collision_frames),
+	GBENU_STATS_P7(tx_single_coll_frames),
+	GBENU_STATS_P7(tx_mult_coll_frames),
+	GBENU_STATS_P7(tx_excessive_collisions),
+	GBENU_STATS_P7(tx_late_collisions),
+	GBENU_STATS_P7(rx_ipg_error),
+	GBENU_STATS_P7(tx_carrier_sense_errors),
+	GBENU_STATS_P7(tx_bytes),
+	GBENU_STATS_P7(tx_64B_frames),
+	GBENU_STATS_P7(tx_65_to_127B_frames),
+	GBENU_STATS_P7(tx_128_to_255B_frames),
+	GBENU_STATS_P7(tx_256_to_511B_frames),
+	GBENU_STATS_P7(tx_512_to_1023B_frames),
+	GBENU_STATS_P7(tx_1024B_frames),
+	GBENU_STATS_P7(net_bytes),
+	GBENU_STATS_P7(rx_bottom_fifo_drop),
+	GBENU_STATS_P7(rx_port_mask_drop),
+	GBENU_STATS_P7(rx_top_fifo_drop),
+	GBENU_STATS_P7(ale_rate_limit_drop),
+	GBENU_STATS_P7(ale_vid_ingress_drop),
+	GBENU_STATS_P7(ale_da_eq_sa_drop),
+	GBENU_STATS_P7(ale_unknown_ucast),
+	GBENU_STATS_P7(ale_unknown_ucast_bytes),
+	GBENU_STATS_P7(ale_unknown_mcast),
+	GBENU_STATS_P7(ale_unknown_mcast_bytes),
+	GBENU_STATS_P7(ale_unknown_bcast),
+	GBENU_STATS_P7(ale_unknown_bcast_bytes),
+	GBENU_STATS_P7(ale_pol_match),
+	GBENU_STATS_P7(ale_pol_match_red),
+	GBENU_STATS_P7(ale_pol_match_yellow),
+	GBENU_STATS_P7(tx_mem_protect_err),
+	GBENU_STATS_P7(tx_pri0_drop),
+	GBENU_STATS_P7(tx_pri1_drop),
+	GBENU_STATS_P7(tx_pri2_drop),
+	GBENU_STATS_P7(tx_pri3_drop),
+	GBENU_STATS_P7(tx_pri4_drop),
+	GBENU_STATS_P7(tx_pri5_drop),
+	GBENU_STATS_P7(tx_pri6_drop),
+	GBENU_STATS_P7(tx_pri7_drop),
+	GBENU_STATS_P7(tx_pri0_drop_bcnt),
+	GBENU_STATS_P7(tx_pri1_drop_bcnt),
+	GBENU_STATS_P7(tx_pri2_drop_bcnt),
+	GBENU_STATS_P7(tx_pri3_drop_bcnt),
+	GBENU_STATS_P7(tx_pri4_drop_bcnt),
+	GBENU_STATS_P7(tx_pri5_drop_bcnt),
+	GBENU_STATS_P7(tx_pri6_drop_bcnt),
+	GBENU_STATS_P7(tx_pri7_drop_bcnt),
+	/* GBENU Module 8 */
+	GBENU_STATS_P8(rx_good_frames),
+	GBENU_STATS_P8(rx_broadcast_frames),
+	GBENU_STATS_P8(rx_multicast_frames),
+	GBENU_STATS_P8(rx_pause_frames),
+	GBENU_STATS_P8(rx_crc_errors),
+	GBENU_STATS_P8(rx_align_code_errors),
+	GBENU_STATS_P8(rx_oversized_frames),
+	GBENU_STATS_P8(rx_jabber_frames),
+	GBENU_STATS_P8(rx_undersized_frames),
+	GBENU_STATS_P8(rx_fragments),
+	GBENU_STATS_P8(ale_drop),
+	GBENU_STATS_P8(ale_overrun_drop),
+	GBENU_STATS_P8(rx_bytes),
+	GBENU_STATS_P8(tx_good_frames),
+	GBENU_STATS_P8(tx_broadcast_frames),
+	GBENU_STATS_P8(tx_multicast_frames),
+	GBENU_STATS_P8(tx_pause_frames),
+	GBENU_STATS_P8(tx_deferred_frames),
+	GBENU_STATS_P8(tx_collision_frames),
+	GBENU_STATS_P8(tx_single_coll_frames),
+	GBENU_STATS_P8(tx_mult_coll_frames),
+	GBENU_STATS_P8(tx_excessive_collisions),
+	GBENU_STATS_P8(tx_late_collisions),
+	GBENU_STATS_P8(rx_ipg_error),
+	GBENU_STATS_P8(tx_carrier_sense_errors),
+	GBENU_STATS_P8(tx_bytes),
+	GBENU_STATS_P8(tx_64B_frames),
+	GBENU_STATS_P8(tx_65_to_127B_frames),
+	GBENU_STATS_P8(tx_128_to_255B_frames),
+	GBENU_STATS_P8(tx_256_to_511B_frames),
+	GBENU_STATS_P8(tx_512_to_1023B_frames),
+	GBENU_STATS_P8(tx_1024B_frames),
+	GBENU_STATS_P8(net_bytes),
+	GBENU_STATS_P8(rx_bottom_fifo_drop),
+	GBENU_STATS_P8(rx_port_mask_drop),
+	GBENU_STATS_P8(rx_top_fifo_drop),
+	GBENU_STATS_P8(ale_rate_limit_drop),
+	GBENU_STATS_P8(ale_vid_ingress_drop),
+	GBENU_STATS_P8(ale_da_eq_sa_drop),
+	GBENU_STATS_P8(ale_unknown_ucast),
+	GBENU_STATS_P8(ale_unknown_ucast_bytes),
+	GBENU_STATS_P8(ale_unknown_mcast),
+	GBENU_STATS_P8(ale_unknown_mcast_bytes),
+	GBENU_STATS_P8(ale_unknown_bcast),
+	GBENU_STATS_P8(ale_unknown_bcast_bytes),
+	GBENU_STATS_P8(ale_pol_match),
+	GBENU_STATS_P8(ale_pol_match_red),
+	GBENU_STATS_P8(ale_pol_match_yellow),
+	GBENU_STATS_P8(tx_mem_protect_err),
+	GBENU_STATS_P8(tx_pri0_drop),
+	GBENU_STATS_P8(tx_pri1_drop),
+	GBENU_STATS_P8(tx_pri2_drop),
+	GBENU_STATS_P8(tx_pri3_drop),
+	GBENU_STATS_P8(tx_pri4_drop),
+	GBENU_STATS_P8(tx_pri5_drop),
+	GBENU_STATS_P8(tx_pri6_drop),
+	GBENU_STATS_P8(tx_pri7_drop),
+	GBENU_STATS_P8(tx_pri0_drop_bcnt),
+	GBENU_STATS_P8(tx_pri1_drop_bcnt),
+	GBENU_STATS_P8(tx_pri2_drop_bcnt),
+	GBENU_STATS_P8(tx_pri3_drop_bcnt),
+	GBENU_STATS_P8(tx_pri4_drop_bcnt),
+	GBENU_STATS_P8(tx_pri5_drop_bcnt),
+	GBENU_STATS_P8(tx_pri6_drop_bcnt),
+	GBENU_STATS_P8(tx_pri7_drop_bcnt),
+};
+
+#define XGBE_STATS0_INFO(field)				\
+{							\
+	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
+	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
+	offsetof(struct xgbe_hw_stats, field)		\
+}
+
+#define XGBE_STATS1_INFO(field)				\
+{							\
+	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
+	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
+	offsetof(struct xgbe_hw_stats, field)		\
+}
+
+#define XGBE_STATS2_INFO(field)				\
+{							\
+	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
+	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
+	offsetof(struct xgbe_hw_stats, field)		\
+}
+
+static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
+	/* GBE module 0 */
+	XGBE_STATS0_INFO(rx_good_frames),
+	XGBE_STATS0_INFO(rx_broadcast_frames),
+	XGBE_STATS0_INFO(rx_multicast_frames),
+	XGBE_STATS0_INFO(rx_oversized_frames),
+	XGBE_STATS0_INFO(rx_undersized_frames),
+	XGBE_STATS0_INFO(overrun_type4),
+	XGBE_STATS0_INFO(overrun_type5),
+	XGBE_STATS0_INFO(rx_bytes),
+	XGBE_STATS0_INFO(tx_good_frames),
+	XGBE_STATS0_INFO(tx_broadcast_frames),
+	XGBE_STATS0_INFO(tx_multicast_frames),
+	XGBE_STATS0_INFO(tx_bytes),
+	XGBE_STATS0_INFO(tx_64byte_frames),
+	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
+	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
+	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
+	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
+	XGBE_STATS0_INFO(tx_1024byte_frames),
+	XGBE_STATS0_INFO(net_bytes),
+	XGBE_STATS0_INFO(rx_sof_overruns),
+	XGBE_STATS0_INFO(rx_mof_overruns),
+	XGBE_STATS0_INFO(rx_dma_overruns),
+	/* XGBE module 1 */
+	XGBE_STATS1_INFO(rx_good_frames),
+	XGBE_STATS1_INFO(rx_broadcast_frames),
+	XGBE_STATS1_INFO(rx_multicast_frames),
+	XGBE_STATS1_INFO(rx_pause_frames),
+	XGBE_STATS1_INFO(rx_crc_errors),
+	XGBE_STATS1_INFO(rx_align_code_errors),
+	XGBE_STATS1_INFO(rx_oversized_frames),
+	XGBE_STATS1_INFO(rx_jabber_frames),
+	XGBE_STATS1_INFO(rx_undersized_frames),
+	XGBE_STATS1_INFO(rx_fragments),
+	XGBE_STATS1_INFO(overrun_type4),
+	XGBE_STATS1_INFO(overrun_type5),
+	XGBE_STATS1_INFO(rx_bytes),
+	XGBE_STATS1_INFO(tx_good_frames),
+	XGBE_STATS1_INFO(tx_broadcast_frames),
+	XGBE_STATS1_INFO(tx_multicast_frames),
+	XGBE_STATS1_INFO(tx_pause_frames),
+	XGBE_STATS1_INFO(tx_deferred_frames),
+	XGBE_STATS1_INFO(tx_collision_frames),
+	XGBE_STATS1_INFO(tx_single_coll_frames),
+	XGBE_STATS1_INFO(tx_mult_coll_frames),
+	XGBE_STATS1_INFO(tx_excessive_collisions),
+	XGBE_STATS1_INFO(tx_late_collisions),
+	XGBE_STATS1_INFO(tx_underrun),
+	XGBE_STATS1_INFO(tx_carrier_sense_errors),
+	XGBE_STATS1_INFO(tx_bytes),
+	XGBE_STATS1_INFO(tx_64byte_frames),
+	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
+	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
+	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
+	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
+	XGBE_STATS1_INFO(tx_1024byte_frames),
+	XGBE_STATS1_INFO(net_bytes),
+	XGBE_STATS1_INFO(rx_sof_overruns),
+	XGBE_STATS1_INFO(rx_mof_overruns),
+	XGBE_STATS1_INFO(rx_dma_overruns),
+	/* XGBE module 2 */
+	XGBE_STATS2_INFO(rx_good_frames),
+	XGBE_STATS2_INFO(rx_broadcast_frames),
+	XGBE_STATS2_INFO(rx_multicast_frames),
+	XGBE_STATS2_INFO(rx_pause_frames),
+	XGBE_STATS2_INFO(rx_crc_errors),
+	XGBE_STATS2_INFO(rx_align_code_errors),
+	XGBE_STATS2_INFO(rx_oversized_frames),
+	XGBE_STATS2_INFO(rx_jabber_frames),
+	XGBE_STATS2_INFO(rx_undersized_frames),
+	XGBE_STATS2_INFO(rx_fragments),
+	XGBE_STATS2_INFO(overrun_type4),
+	XGBE_STATS2_INFO(overrun_type5),
+	XGBE_STATS2_INFO(rx_bytes),
+	XGBE_STATS2_INFO(tx_good_frames),
+	XGBE_STATS2_INFO(tx_broadcast_frames),
+	XGBE_STATS2_INFO(tx_multicast_frames),
+	XGBE_STATS2_INFO(tx_pause_frames),
+	XGBE_STATS2_INFO(tx_deferred_frames),
+	XGBE_STATS2_INFO(tx_collision_frames),
+	XGBE_STATS2_INFO(tx_single_coll_frames),
+	XGBE_STATS2_INFO(tx_mult_coll_frames),
+	XGBE_STATS2_INFO(tx_excessive_collisions),
+	XGBE_STATS2_INFO(tx_late_collisions),
+	XGBE_STATS2_INFO(tx_underrun),
+	XGBE_STATS2_INFO(tx_carrier_sense_errors),
+	XGBE_STATS2_INFO(tx_bytes),
+	XGBE_STATS2_INFO(tx_64byte_frames),
+	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
+	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
+	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
+	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
+	XGBE_STATS2_INFO(tx_1024byte_frames),
+	XGBE_STATS2_INFO(net_bytes),
+	XGBE_STATS2_INFO(rx_sof_overruns),
+	XGBE_STATS2_INFO(rx_mof_overruns),
+	XGBE_STATS2_INFO(rx_dma_overruns),
+};
+
+#define for_each_intf(i, priv) \
+	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
+
+#define for_each_sec_slave(slave, priv) \
+	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
+
+#define first_sec_slave(priv)					\
+	list_first_entry(&priv->secondary_slaves, \
+			struct gbe_slave, slave_list)
+
+static void keystone_get_drvinfo(struct net_device *ndev,
+				 struct ethtool_drvinfo *info)
+{
+	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
+	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
+}
+
+static u32 keystone_get_msglevel(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+
+	return netcp->msg_enable;
+}
+
+static void keystone_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+
+	netcp->msg_enable = value;
+}
+
+static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
+{
+	struct gbe_intf *gbe_intf;
+
+	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+	if (!gbe_intf)
+		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+
+	return gbe_intf;
+}
+
+static void keystone_get_stat_strings(struct net_device *ndev,
+				      uint32_t stringset, uint8_t *data)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_intf *gbe_intf;
+	struct gbe_priv *gbe_dev;
+	int i;
+
+	gbe_intf = keystone_get_intf_data(netcp);
+	if (!gbe_intf)
+		return;
+	gbe_dev = gbe_intf->gbe_dev;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < gbe_dev->num_et_stats; i++) {
+			memcpy(data, gbe_dev->et_stats[i].desc,
+			       ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	case ETH_SS_TEST:
+		break;
+	}
+}
+
+static int keystone_get_sset_count(struct net_device *ndev, int stringset)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_intf *gbe_intf;
+	struct gbe_priv *gbe_dev;
+
+	gbe_intf = keystone_get_intf_data(netcp);
+	if (!gbe_intf)
+		return -EINVAL;
+	gbe_dev = gbe_intf->gbe_dev;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		return 0;
+	case ETH_SS_STATS:
+		return gbe_dev->num_et_stats;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+	u32  __iomem *p_stats_entry;
+	int i;
+
+	for (i = 0; i < gbe_dev->num_et_stats; i++) {
+		if (gbe_dev->et_stats[i].type == stats_mod) {
+			p_stats_entry = base + gbe_dev->et_stats[i].offset;
+			gbe_dev->hw_stats[i] = 0;
+			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+		}
+	}
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+					     int et_stats_entry)
+{
+	void __iomem *base = NULL;
+	u32  __iomem *p_stats_entry;
+	u32 curr, delta;
+
+	/* The hw_stats_regs pointers are already
+	 * properly set to point to the right base:
+	 */
+	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+	curr = readl(p_stats_entry);
+	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+	gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+	int i;
+
+	for (i = 0; i < gbe_dev->num_et_stats; i++) {
+		gbe_update_hw_stats_entry(gbe_dev, i);
+
+		if (data)
+			data[i] = gbe_dev->hw_stats[i];
+	}
+}
+
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+					       int stats_mod)
+{
+	u32 val;
+
+	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+	switch (stats_mod) {
+	case GBE_STATSA_MODULE:
+	case GBE_STATSB_MODULE:
+		val &= ~GBE_STATS_CD_SEL;
+		break;
+	case GBE_STATSC_MODULE:
+	case GBE_STATSD_MODULE:
+		val |= GBE_STATS_CD_SEL;
+		break;
+	default:
+		return;
+	}
+
+	/* make the stat module visible */
+	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
+
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+	gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+	int et_entry, j, pair;
+
+	for (pair = 0; pair < 2; pair++) {
+		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+						      GBE_STATSC_MODULE :
+						      GBE_STATSA_MODULE));
+
+		for (j = 0; j < half_num_et_stats; j++) {
+			et_entry = pair * half_num_et_stats + j;
+			gbe_update_hw_stats_entry(gbe_dev, et_entry);
+
+			if (data)
+				data[et_entry] = gbe_dev->hw_stats[et_entry];
+		}
+	}
+}
+
+static void keystone_get_ethtool_stats(struct net_device *ndev,
+				       struct ethtool_stats *stats,
+				       uint64_t *data)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_intf *gbe_intf;
+	struct gbe_priv *gbe_dev;
+
+	gbe_intf = keystone_get_intf_data(netcp);
+	if (!gbe_intf)
+		return;
+
+	gbe_dev = gbe_intf->gbe_dev;
+	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	if (IS_SS_ID_VER_14(gbe_dev))
+		gbe_update_stats_ver14(gbe_dev, data);
+	else
+		gbe_update_stats(gbe_dev, data);
+	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+}
+
+static int keystone_get_link_ksettings(struct net_device *ndev,
+				       struct ethtool_link_ksettings *cmd)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct phy_device *phy = ndev->phydev;
+	struct gbe_intf *gbe_intf;
+
+	if (!phy)
+		return -EINVAL;
+
+	gbe_intf = keystone_get_intf_data(netcp);
+	if (!gbe_intf)
+		return -EINVAL;
+
+	if (!gbe_intf->slave)
+		return -EINVAL;
+
+	phy_ethtool_ksettings_get(phy, cmd);
+	cmd->base.port = gbe_intf->slave->phy_port_t;
+
+	return 0;
+}
+
+static int keystone_set_link_ksettings(struct net_device *ndev,
+				       const struct ethtool_link_ksettings *cmd)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct phy_device *phy = ndev->phydev;
+	struct gbe_intf *gbe_intf;
+	u8 port = cmd->base.port;
+	u32 advertising, supported;
+	u32 features;
+
+	ethtool_convert_link_mode_to_legacy_u32(&advertising,
+						cmd->link_modes.advertising);
+	ethtool_convert_link_mode_to_legacy_u32(&supported,
+						cmd->link_modes.supported);
+	features = advertising & supported;
+
+	if (!phy)
+		return -EINVAL;
+
+	gbe_intf = keystone_get_intf_data(netcp);
+	if (!gbe_intf)
+		return -EINVAL;
+
+	if (!gbe_intf->slave)
+		return -EINVAL;
+
+	if (port != gbe_intf->slave->phy_port_t) {
+		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
+			return -EINVAL;
+
+		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
+			return -EINVAL;
+
+		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
+			return -EINVAL;
+
+		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
+			return -EINVAL;
+
+		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
+			return -EINVAL;
+	}
+
+	gbe_intf->slave->phy_port_t = port;
+	return phy_ethtool_ksettings_set(phy, cmd);
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static int keystone_get_ts_info(struct net_device *ndev,
+				struct ethtool_ts_info *info)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_intf *gbe_intf;
+
+	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
+		return -EINVAL;
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+	return 0;
+}
+#else
+static int keystone_get_ts_info(struct net_device *ndev,
+				struct ethtool_ts_info *info)
+{
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE;
+	info->phc_index = -1;
+	info->tx_types = 0;
+	info->rx_filters = 0;
+	return 0;
+}
+#endif /* CONFIG_TI_CPTS */
+
+static const struct ethtool_ops keystone_ethtool_ops = {
+	.get_drvinfo		= keystone_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_msglevel		= keystone_get_msglevel,
+	.set_msglevel		= keystone_set_msglevel,
+	.get_strings		= keystone_get_stat_strings,
+	.get_sset_count		= keystone_get_sset_count,
+	.get_ethtool_stats	= keystone_get_ethtool_stats,
+	.get_link_ksettings	= keystone_get_link_ksettings,
+	.set_link_ksettings	= keystone_set_link_ksettings,
+	.get_ts_info		= keystone_get_ts_info,
+};
+
+static void gbe_set_slave_mac(struct gbe_slave *slave,
+			      struct gbe_intf *gbe_intf)
+{
+	struct net_device *ndev = gbe_intf->ndev;
+
+	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
+	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
+}
+
+static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
+{
+	if (priv->host_port == 0)
+		return slave_num + 1;
+
+	return slave_num;
+}
+
+static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
+					  struct net_device *ndev,
+					  struct gbe_slave *slave,
+					  int up)
+{
+	struct phy_device *phy = slave->phy;
+	u32 mac_control = 0;
+
+	if (up) {
+		mac_control = slave->mac_control;
+		if (phy && (phy->speed == SPEED_1000)) {
+			mac_control |= MACSL_GIG_MODE;
+			mac_control &= ~MACSL_XGIG_MODE;
+		} else if (phy && (phy->speed == SPEED_10000)) {
+			mac_control |= MACSL_XGIG_MODE;
+			mac_control &= ~MACSL_GIG_MODE;
+		}
+
+		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+						 mac_control));
+
+		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+				     ALE_PORT_STATE,
+				     ALE_PORT_STATE_FORWARD);
+
+		if (ndev && slave->open &&
+		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
+			netif_carrier_on(ndev);
+	} else {
+		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+						 mac_control));
+		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+				     ALE_PORT_STATE,
+				     ALE_PORT_STATE_DISABLE);
+		if (ndev &&
+		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
+			netif_carrier_off(ndev);
+	}
+
+	if (phy)
+		phy_print_status(phy);
+}
+
+static bool gbe_phy_link_status(struct gbe_slave *slave)
+{
+	 return !slave->phy || slave->phy->link;
+}
+
+#define RGMII_REG_STATUS_LINK	BIT(0)
+
+static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
+{
+	u32 val = 0;
+
+	val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
+	*status = !!(val & RGMII_REG_STATUS_LINK);
+}
+
+static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
+					  struct gbe_slave *slave,
+					  struct net_device *ndev)
+{
+	bool sw_link_state = true, phy_link_state;
+	int sp = slave->slave_num, link_state;
+
+	if (!slave->open)
+		return;
+
+	if (SLAVE_LINK_IS_RGMII(slave))
+		netcp_2u_rgmii_get_port_link(gbe_dev,
+					     &sw_link_state);
+	if (SLAVE_LINK_IS_SGMII(slave))
+		sw_link_state =
+		netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
+
+	phy_link_state = gbe_phy_link_status(slave);
+	link_state = phy_link_state & sw_link_state;
+
+	if (atomic_xchg(&slave->link_state, link_state) != link_state)
+		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
+					      link_state);
+}
+
+static void xgbe_adjust_link(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_intf *gbe_intf;
+
+	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+	if (!gbe_intf)
+		return;
+
+	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+				      ndev);
+}
+
+static void gbe_adjust_link(struct net_device *ndev)
+{
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_intf *gbe_intf;
+
+	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+	if (!gbe_intf)
+		return;
+
+	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+				      ndev);
+}
+
+static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
+{
+	struct gbe_priv *gbe_dev = netdev_priv(ndev);
+	struct gbe_slave *slave;
+
+	for_each_sec_slave(slave, gbe_dev)
+		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+}
+
+/* Reset EMAC
+ * Soft reset is set and polled until clear, or until a timeout occurs
+ */
+static int gbe_port_reset(struct gbe_slave *slave)
+{
+	u32 i, v;
+
+	/* Set the soft reset bit */
+	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
+
+	/* Wait for the bit to clear */
+	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
+		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
+			return 0;
+	}
+
+	/* Timeout on the reset */
+	return GMACSL_RET_WARN_RESET_INCOMPLETE;
+}
+
+/* Configure EMAC */
+static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+			    int max_rx_len)
+{
+	void __iomem *rx_maxlen_reg;
+	u32 xgmii_mode;
+
+	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
+		max_rx_len = NETCP_MAX_FRAME_SIZE;
+
+	/* Enable correct MII mode at SS level */
+	if (IS_SS_ID_XGBE(gbe_dev) &&
+	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
+		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
+		xgmii_mode |= (1 << slave->slave_num);
+		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
+	}
+
+	if (IS_SS_ID_MU(gbe_dev))
+		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
+	else
+		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
+
+	writel(max_rx_len, rx_maxlen_reg);
+	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
+}
+
+static void gbe_sgmii_rtreset(struct gbe_priv *priv,
+			      struct gbe_slave *slave, bool set)
+{
+	if (SLAVE_LINK_IS_XGMII(slave))
+		return;
+
+	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
+			    slave->slave_num, set);
+}
+
+static void gbe_slave_stop(struct gbe_intf *intf)
+{
+	struct gbe_priv *gbe_dev = intf->gbe_dev;
+	struct gbe_slave *slave = intf->slave;
+
+	if (!IS_SS_ID_2U(gbe_dev))
+		gbe_sgmii_rtreset(gbe_dev, slave, true);
+	gbe_port_reset(slave);
+	/* Disable forwarding */
+	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
+			   1 << slave->port_num, 0, 0);
+
+	if (!slave->phy)
+		return;
+
+	phy_stop(slave->phy);
+	phy_disconnect(slave->phy);
+	slave->phy = NULL;
+}
+
+static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
+{
+	if (SLAVE_LINK_IS_XGMII(slave))
+		return;
+
+	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
+	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
+			   slave->link_interface);
+}
+
+static int gbe_slave_open(struct gbe_intf *gbe_intf)
+{
+	struct gbe_priv *priv = gbe_intf->gbe_dev;
+	struct gbe_slave *slave = gbe_intf->slave;
+	phy_interface_t phy_mode;
+	bool has_phy = false;
+
+	void (*hndlr)(struct net_device *) = gbe_adjust_link;
+
+	if (!IS_SS_ID_2U(priv))
+		gbe_sgmii_config(priv, slave);
+	gbe_port_reset(slave);
+	if (!IS_SS_ID_2U(priv))
+		gbe_sgmii_rtreset(priv, slave, false);
+	gbe_port_config(priv, slave, priv->rx_packet_max);
+	gbe_set_slave_mac(slave, gbe_intf);
+	/* For NU & 2U switch, map the vlan priorities to zero
+	 * as we only configure to use priority 0
+	 */
+	if (IS_SS_ID_MU(priv))
+		writel(HOST_TX_PRI_MAP_DEFAULT,
+		       GBE_REG_ADDR(slave, port_regs, rx_pri_map));
+
+	/* enable forwarding */
+	cpsw_ale_control_set(priv->ale, slave->port_num,
+			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
+			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
+
+	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+		has_phy = true;
+		phy_mode = PHY_INTERFACE_MODE_SGMII;
+		slave->phy_port_t = PORT_MII;
+	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
+		has_phy = true;
+		phy_mode = of_get_phy_mode(slave->node);
+		/* if phy-mode is not present, default to
+		 * PHY_INTERFACE_MODE_RGMII
+		 */
+		if (phy_mode < 0)
+			phy_mode = PHY_INTERFACE_MODE_RGMII;
+
+		if (!phy_interface_mode_is_rgmii(phy_mode)) {
+			dev_err(priv->dev,
+				"Unsupported phy mode %d\n", phy_mode);
+			return -EINVAL;
+		}
+		slave->phy_port_t = PORT_MII;
+	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
+		has_phy = true;
+		phy_mode = PHY_INTERFACE_MODE_NA;
+		slave->phy_port_t = PORT_FIBRE;
+	}
+
+	if (has_phy) {
+		if (IS_SS_ID_XGBE(priv))
+			hndlr = xgbe_adjust_link;
+
+		slave->phy = of_phy_connect(gbe_intf->ndev,
+					    slave->phy_node,
+					    hndlr, 0,
+					    phy_mode);
+		if (!slave->phy) {
+			dev_err(priv->dev, "phy not found on slave %d\n",
+				slave->slave_num);
+			return -ENODEV;
+		}
+		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
+			phydev_name(slave->phy));
+		phy_start(slave->phy);
+	}
+	return 0;
+}
+
+static void gbe_init_host_port(struct gbe_priv *priv)
+{
+	int bypass_en = 1;
+
+	/* Host Tx Pri */
+	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
+		writel(HOST_TX_PRI_MAP_DEFAULT,
+		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
+
+	/* Max length register */
+	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
+						  rx_maxlen));
+
+	cpsw_ale_start(priv->ale);
+
+	if (priv->enable_ale)
+		bypass_en = 0;
+
+	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
+
+	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
+
+	cpsw_ale_control_set(priv->ale, priv->host_port,
+			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+	cpsw_ale_control_set(priv->ale, 0,
+			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
+			     GBE_PORT_MASK(priv->ale_ports));
+
+	cpsw_ale_control_set(priv->ale, 0,
+			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
+			     GBE_PORT_MASK(priv->ale_ports - 1));
+
+	cpsw_ale_control_set(priv->ale, 0,
+			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+			     GBE_PORT_MASK(priv->ale_ports));
+
+	cpsw_ale_control_set(priv->ale, 0,
+			     ALE_PORT_UNTAGGED_EGRESS,
+			     GBE_PORT_MASK(priv->ale_ports));
+}
+
+static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	u16 vlan_id;
+
+	cpsw_ale_add_mcast(gbe_dev->ale, addr,
+			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
+			   ALE_MCAST_FWD_2);
+	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+		cpsw_ale_add_mcast(gbe_dev->ale, addr,
+				   GBE_PORT_MASK(gbe_dev->ale_ports),
+				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
+	}
+}
+
+static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	u16 vlan_id;
+
+	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
+		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+				   ALE_VLAN, vlan_id);
+}
+
+static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	u16 vlan_id;
+
+	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
+
+	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
+	}
+}
+
+static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	u16 vlan_id;
+
+	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+				   ALE_VLAN, vlan_id);
+	}
+}
+
+static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
+		naddr->addr, naddr->type);
+
+	switch (naddr->type) {
+	case ADDR_MCAST:
+	case ADDR_BCAST:
+		gbe_add_mcast_addr(gbe_intf, naddr->addr);
+		break;
+	case ADDR_UCAST:
+	case ADDR_DEV:
+		gbe_add_ucast_addr(gbe_intf, naddr->addr);
+		break;
+	case ADDR_ANY:
+		/* nothing to do for promiscuous */
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
+		naddr->addr, naddr->type);
+
+	switch (naddr->type) {
+	case ADDR_MCAST:
+	case ADDR_BCAST:
+		gbe_del_mcast_addr(gbe_intf, naddr->addr);
+		break;
+	case ADDR_UCAST:
+	case ADDR_DEV:
+		gbe_del_ucast_addr(gbe_intf, naddr->addr);
+		break;
+	case ADDR_ANY:
+		/* nothing to do for promiscuous */
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gbe_add_vid(void *intf_priv, int vid)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	set_bit(vid, gbe_intf->active_vlans);
+
+	cpsw_ale_add_vlan(gbe_dev->ale, vid,
+			  GBE_PORT_MASK(gbe_dev->ale_ports),
+			  GBE_MASK_NO_PORTS,
+			  GBE_PORT_MASK(gbe_dev->ale_ports),
+			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
+
+	return 0;
+}
+
+static int gbe_del_vid(void *intf_priv, int vid)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
+	clear_bit(vid, gbe_intf->active_vlans);
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
+#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
+
+static void gbe_txtstamp(void *context, struct sk_buff *skb)
+{
+	struct gbe_intf *gbe_intf = context;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	cpts_tx_timestamp(gbe_dev->cpts, skb);
+}
+
+static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
+			      const struct netcp_packet *p_info)
+{
+	struct sk_buff *skb = p_info->skb;
+
+	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
+}
+
+static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
+				 struct netcp_packet *p_info)
+{
+	struct phy_device *phydev = p_info->skb->dev->phydev;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+	    !cpts_is_tx_enabled(gbe_dev->cpts))
+		return 0;
+
+	/* If phy has the txtstamp api, assume it will do it.
+	 * We mark it here because skb_tx_timestamp() is called
+	 * after all the txhooks are called.
+	 */
+	if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
+		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		return 0;
+	}
+
+	if (gbe_need_txtstamp(gbe_intf, p_info)) {
+		p_info->txtstamp = gbe_txtstamp;
+		p_info->ts_context = (void *)gbe_intf;
+		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
+	}
+
+	return 0;
+}
+
+static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
+{
+	struct phy_device *phydev = p_info->skb->dev->phydev;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	if (p_info->rxtstamp_complete)
+		return 0;
+
+	if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
+		p_info->rxtstamp_complete = true;
+		return 0;
+	}
+
+	cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+	p_info->rxtstamp_complete = true;
+
+	return 0;
+}
+
+static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	struct cpts *cpts = gbe_dev->cpts;
+	struct hwtstamp_config cfg;
+
+	if (!cpts)
+		return -EOPNOTSUPP;
+
+	cfg.flags = 0;
+	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
+		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
+			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	struct gbe_slave *slave = gbe_intf->slave;
+	u32 ts_en, seq_id, ctl;
+
+	if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
+	    !cpts_is_tx_enabled(gbe_dev->cpts)) {
+		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
+		return;
+	}
+
+	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
+	ctl = ETH_P_1588 | TS_TTL_NONZERO |
+		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
+		(slave->ts_ctl.uni ?  TS_UNI_EN :
+			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
+
+	if (cpts_is_tx_enabled(gbe_dev->cpts))
+		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
+
+	if (cpts_is_rx_enabled(gbe_dev->cpts))
+		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
+
+	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
+	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
+	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
+}
+
+static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+{
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	struct cpts *cpts = gbe_dev->cpts;
+	struct hwtstamp_config cfg;
+
+	if (!cpts)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (cfg.flags)
+		return -EINVAL;
+
+	switch (cfg.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		cpts_tx_enable(cpts, 0);
+		break;
+	case HWTSTAMP_TX_ON:
+		cpts_tx_enable(cpts, 1);
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (cfg.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		cpts_rx_enable(cpts, 0);
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	gbe_hwtstamp(gbe_intf);
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void gbe_register_cpts(struct gbe_priv *gbe_dev)
+{
+	if (!gbe_dev->cpts)
+		return;
+
+	if (gbe_dev->cpts_registered > 0)
+		goto done;
+
+	if (cpts_register(gbe_dev->cpts)) {
+		dev_err(gbe_dev->dev, "error registering cpts device\n");
+		return;
+	}
+
+done:
+	++gbe_dev->cpts_registered;
+}
+
+static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
+{
+	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
+		return;
+
+	if (--gbe_dev->cpts_registered)
+		return;
+
+	cpts_unregister(gbe_dev->cpts);
+}
+#else
+static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
+					struct netcp_packet *p_info)
+{
+	return 0;
+}
+
+static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
+			       struct netcp_packet *p_info)
+{
+	return 0;
+}
+
+static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
+			       struct ifreq *ifr, int cmd)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
+{
+}
+
+static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
+{
+}
+
+static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* CONFIG_TI_CPTS */
+
+static int gbe_set_rx_mode(void *intf_priv, bool promisc)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	struct cpsw_ale *ale = gbe_dev->ale;
+	unsigned long timeout;
+	int i, ret = -ETIMEDOUT;
+
+	/* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
+	 * slaves are port 1 and up
+	 */
+	for (i = 0; i <= gbe_dev->num_slaves; i++) {
+		cpsw_ale_control_set(ale, i,
+				     ALE_PORT_NOLEARN, !!promisc);
+		cpsw_ale_control_set(ale, i,
+				     ALE_PORT_NO_SA_UPDATE, !!promisc);
+	}
+
+	if (!promisc) {
+		/* Don't Flood All Unicast Packets to Host port */
+		cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+		dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
+		return 0;
+	}
+
+	timeout = jiffies + HZ;
+
+	/* Clear All Untouched entries */
+	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+	do {
+		cpu_relax();
+		if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
+			ret = 0;
+			break;
+		}
+
+	} while (time_after(timeout, jiffies));
+
+	/* Make sure it is not a false timeout */
+	if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+		return ret;
+
+	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+	/* Clear all mcast from ALE */
+	cpsw_ale_flush_multicast(ale,
+				 GBE_PORT_MASK(gbe_dev->ale_ports),
+				 -1);
+
+	/* Flood All Unicast Packets to Host port */
+	cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+	dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
+	return ret;
+}
+
+static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct phy_device *phy = gbe_intf->slave->phy;
+
+	if (!phy || !phy->drv->hwtstamp) {
+		switch (cmd) {
+		case SIOCGHWTSTAMP:
+			return gbe_hwtstamp_get(gbe_intf, req);
+		case SIOCSHWTSTAMP:
+			return gbe_hwtstamp_set(gbe_intf, req);
+		}
+	}
+
+	if (phy)
+		return phy_mii_ioctl(phy, req, cmd);
+
+	return -EOPNOTSUPP;
+}
+
+static void netcp_ethss_timer(struct timer_list *t)
+{
+	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
+	struct gbe_intf *gbe_intf;
+	struct gbe_slave *slave;
+
+	/* Check & update SGMII link state of interfaces */
+	for_each_intf(gbe_intf, gbe_dev) {
+		if (!gbe_intf->slave->open)
+			continue;
+		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
+					      gbe_intf->ndev);
+	}
+
+	/* Check & update SGMII link state of secondary ports */
+	for_each_sec_slave(slave, gbe_dev) {
+		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+	}
+
+	/* A timer runs as a BH, no need to block them */
+	spin_lock(&gbe_dev->hw_stats_lock);
+
+	if (IS_SS_ID_VER_14(gbe_dev))
+		gbe_update_stats_ver14(gbe_dev, NULL);
+	else
+		gbe_update_stats(gbe_dev, NULL);
+
+	spin_unlock(&gbe_dev->hw_stats_lock);
+
+	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
+	add_timer(&gbe_dev->timer);
+}
+
+static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
+{
+	struct gbe_intf *gbe_intf = data;
+
+	p_info->tx_pipe = &gbe_intf->tx_pipe;
+
+	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
+}
+
+static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
+{
+	struct gbe_intf *gbe_intf = data;
+
+	return gbe_rxtstamp(gbe_intf, p_info);
+}
+
+static int gbe_open(void *intf_priv, struct net_device *ndev)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_slave *slave = gbe_intf->slave;
+	int port_num = slave->port_num;
+	u32 reg, val;
+	int ret;
+
+	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
+	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
+		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
+		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
+
+	/* For 10G and on NetCP 1.5, use directed to port */
+	if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
+		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
+
+	if (gbe_dev->enable_ale)
+		gbe_intf->tx_pipe.switch_to_port = 0;
+	else
+		gbe_intf->tx_pipe.switch_to_port = port_num;
+
+	dev_dbg(gbe_dev->dev,
+		"opened TX channel %s: %p with to port %d, flags %d\n",
+		gbe_intf->tx_pipe.dma_chan_name,
+		gbe_intf->tx_pipe.dma_channel,
+		gbe_intf->tx_pipe.switch_to_port,
+		gbe_intf->tx_pipe.flags);
+
+	gbe_slave_stop(gbe_intf);
+
+	/* disable priority elevation and enable statistics on all ports */
+	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
+
+	/* Control register */
+	val = GBE_CTL_P0_ENABLE;
+	if (IS_SS_ID_MU(gbe_dev)) {
+		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
+		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
+	}
+	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
+
+	/* All statistics enabled and STAT AB visible by default */
+	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
+						    stat_port_en));
+
+	ret = gbe_slave_open(gbe_intf);
+	if (ret)
+		goto fail;
+
+	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
+	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
+
+	slave->open = true;
+	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
+
+	gbe_register_cpts(gbe_dev);
+
+	return 0;
+
+fail:
+	gbe_slave_stop(gbe_intf);
+	return ret;
+}
+
+static int gbe_close(void *intf_priv, struct net_device *ndev)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+	struct netcp_intf *netcp = netdev_priv(ndev);
+	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+	gbe_unregister_cpts(gbe_dev);
+
+	gbe_slave_stop(gbe_intf);
+
+	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
+	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
+
+	gbe_intf->slave->open = false;
+	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static void init_slave_ts_ctl(struct gbe_slave *slave)
+{
+	slave->ts_ctl.uni = 1;
+	slave->ts_ctl.dst_port_map =
+		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
+	slave->ts_ctl.maddr_map =
+		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
+}
+
+#else
+static void init_slave_ts_ctl(struct gbe_slave *slave)
+{
+}
+#endif /* CONFIG_TI_CPTS */
+
+static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+		      struct device_node *node)
+{
+	int port_reg_num;
+	u32 port_reg_ofs, emac_reg_ofs;
+	u32 port_reg_blk_sz, emac_reg_blk_sz;
+
+	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
+		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(node, "link-interface",
+				 &slave->link_interface)) {
+		dev_warn(gbe_dev->dev,
+			 "missing link-interface value defaulting to 1G mac-phy link\n");
+		slave->link_interface = SGMII_LINK_MAC_PHY;
+	}
+
+	slave->node = node;
+	slave->open = false;
+	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+	    (slave->link_interface == RGMII_LINK_MAC_PHY) ||
+	    (slave->link_interface == XGMII_LINK_MAC_PHY))
+		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
+	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
+
+	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
+		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
+	else
+		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
+
+	/* Emac regs memmap are contiguous but port regs are not */
+	port_reg_num = slave->slave_num;
+	if (IS_SS_ID_VER_14(gbe_dev)) {
+		if (slave->slave_num > 1) {
+			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
+			port_reg_num -= 2;
+		} else {
+			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
+		}
+		emac_reg_ofs = GBE13_EMAC_OFFSET;
+		port_reg_blk_sz = 0x30;
+		emac_reg_blk_sz = 0x40;
+	} else if (IS_SS_ID_MU(gbe_dev)) {
+		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
+		emac_reg_ofs = GBENU_EMAC_OFFSET;
+		port_reg_blk_sz = 0x1000;
+		emac_reg_blk_sz = 0x1000;
+	} else if (IS_SS_ID_XGBE(gbe_dev)) {
+		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
+		emac_reg_ofs = XGBE10_EMAC_OFFSET;
+		port_reg_blk_sz = 0x30;
+		emac_reg_blk_sz = 0x40;
+	} else {
+		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
+			gbe_dev->ss_version);
+		return -EINVAL;
+	}
+
+	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
+				(port_reg_blk_sz * port_reg_num);
+	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
+				(emac_reg_blk_sz * slave->slave_num);
+
+	if (IS_SS_ID_VER_14(gbe_dev)) {
+		/* Initialize  slave port register offsets */
+		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
+		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
+		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
+		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+		/* Initialize EMAC register offsets */
+		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
+		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+
+	} else if (IS_SS_ID_MU(gbe_dev)) {
+		/* Initialize  slave port register offsets */
+		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
+		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
+		GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
+		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
+		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
+		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
+		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
+		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
+		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
+
+		/* Initialize EMAC register offsets */
+		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
+		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
+
+	} else if (IS_SS_ID_XGBE(gbe_dev)) {
+		/* Initialize  slave port register offsets */
+		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
+		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
+		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
+		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+		/* Initialize EMAC register offsets */
+		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
+		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+	}
+
+	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
+
+	init_slave_ts_ctl(slave);
+	return 0;
+}
+
+static void init_secondary_ports(struct gbe_priv *gbe_dev,
+				 struct device_node *node)
+{
+	struct device *dev = gbe_dev->dev;
+	phy_interface_t phy_mode;
+	struct gbe_priv **priv;
+	struct device_node *port;
+	struct gbe_slave *slave;
+	bool mac_phy_link = false;
+
+	for_each_child_of_node(node, port) {
+		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
+		if (!slave) {
+			dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
+				port->name);
+			continue;
+		}
+
+		if (init_slave(gbe_dev, slave, port)) {
+			dev_err(dev,
+				"Failed to initialize secondary port(%s), skipping...\n",
+				port->name);
+			devm_kfree(dev, slave);
+			continue;
+		}
+
+		if (!IS_SS_ID_2U(gbe_dev))
+			gbe_sgmii_config(gbe_dev, slave);
+		gbe_port_reset(slave);
+		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
+		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
+		gbe_dev->num_slaves++;
+		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+		    (slave->link_interface == XGMII_LINK_MAC_PHY))
+			mac_phy_link = true;
+
+		slave->open = true;
+		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+			of_node_put(port);
+			break;
+		}
+	}
+
+	/* of_phy_connect() is needed only for MAC-PHY interface */
+	if (!mac_phy_link)
+		return;
+
+	/* Allocate dummy netdev device for attaching to phy device */
+	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
+					NET_NAME_UNKNOWN, ether_setup);
+	if (!gbe_dev->dummy_ndev) {
+		dev_err(dev,
+			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
+		return;
+	}
+	priv = netdev_priv(gbe_dev->dummy_ndev);
+	*priv = gbe_dev;
+
+	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+		phy_mode = PHY_INTERFACE_MODE_SGMII;
+		slave->phy_port_t = PORT_MII;
+	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
+		phy_mode = PHY_INTERFACE_MODE_RGMII;
+		slave->phy_port_t = PORT_MII;
+	} else {
+		phy_mode = PHY_INTERFACE_MODE_NA;
+		slave->phy_port_t = PORT_FIBRE;
+	}
+
+	for_each_sec_slave(slave, gbe_dev) {
+		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+		    (slave->link_interface != XGMII_LINK_MAC_PHY))
+			continue;
+		slave->phy =
+			of_phy_connect(gbe_dev->dummy_ndev,
+				       slave->phy_node,
+				       gbe_adjust_link_sec_slaves,
+				       0, phy_mode);
+		if (!slave->phy) {
+			dev_err(dev, "phy not found for slave %d\n",
+				slave->slave_num);
+		} else {
+			dev_dbg(dev, "phy found: id is: 0x%s\n",
+				phydev_name(slave->phy));
+			phy_start(slave->phy);
+		}
+	}
+}
+
+static void free_secondary_ports(struct gbe_priv *gbe_dev)
+{
+	struct gbe_slave *slave;
+
+	while (!list_empty(&gbe_dev->secondary_slaves)) {
+		slave = first_sec_slave(gbe_dev);
+
+		if (slave->phy)
+			phy_disconnect(slave->phy);
+		list_del(&slave->slave_list);
+	}
+	if (gbe_dev->dummy_ndev)
+		free_netdev(gbe_dev->dummy_ndev);
+}
+
+static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
+				 struct device_node *node)
+{
+	struct resource res;
+	void __iomem *regs;
+	int ret, i;
+
+	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't xlate xgbe of node(%s) ss address at %d\n",
+			node->name, XGBE_SS_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->ss_regs = regs;
+
+	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't xlate xgbe of node(%s) sm address at %d\n",
+			node->name, XGBE_SM_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->switch_regs = regs;
+
+	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't xlate xgbe serdes of node(%s) address at %d\n",
+			node->name, XGBE_SERDES_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->xgbe_serdes_regs = regs;
+
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = xgbe10_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
+	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
+					 gbe_dev->num_et_stats, sizeof(u64),
+					 GFP_KERNEL);
+	if (!gbe_dev->hw_stats) {
+		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	gbe_dev->hw_stats_prev =
+		devm_kcalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats, sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	gbe_dev->ss_version = XGBE_SS_VERSION_10;
+	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
+					XGBE10_SGMII_MODULE_OFFSET;
+	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
+
+	for (i = 0; i < gbe_dev->max_num_ports; i++)
+		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
+			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
+
+	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
+	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
+	gbe_dev->ale_ports = gbe_dev->max_num_ports;
+	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
+	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
+	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
+
+	/* Subsystem registers */
+	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
+
+	/* Switch module registers */
+	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+	/* Host port registers */
+	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+	return 0;
+}
+
+static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
+				    struct device_node *node)
+{
+	struct resource res;
+	void __iomem *regs;
+	int ret;
+
+	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't translate of node(%s) of gbe ss address at %d\n",
+			node->name, GBE_SS_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->ss_regs = regs;
+	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
+	return 0;
+}
+
+static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
+				struct device_node *node)
+{
+	struct resource res;
+	void __iomem *regs;
+	int i, ret;
+
+	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't translate of gbe node(%s) address at index %d\n",
+			node->name, GBE_SGMII34_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev,
+			"Failed to map gbe sgmii port34 register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->sgmii_port34_regs = regs;
+
+	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't translate of gbe node(%s) address at index %d\n",
+			node->name, GBE_SM_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev,
+			"Failed to map gbe switch module register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->switch_regs = regs;
+
+	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+	gbe_dev->et_stats = gbe13_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
+	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
+					 gbe_dev->num_et_stats, sizeof(u64),
+					 GFP_KERNEL);
+	if (!gbe_dev->hw_stats) {
+		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	gbe_dev->hw_stats_prev =
+		devm_kcalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats, sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
+	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
+
+	/* K2HK has only 2 hw stats modules visible at a time, so
+	 * module 0 & 2 points to one base and
+	 * module 1 & 3 points to the other base
+	 */
+	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
+		gbe_dev->hw_stats_regs[i] =
+			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
+			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
+	}
+
+	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
+	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
+	gbe_dev->ale_ports = gbe_dev->max_num_ports;
+	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
+	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
+	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
+
+	/* Subsystem registers */
+	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+
+	/* Switch module registers */
+	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
+	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+	/* Host port registers */
+	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+	return 0;
+}
+
+static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
+				struct device_node *node)
+{
+	struct resource res;
+	void __iomem *regs;
+	int i, ret;
+
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = gbenu_et_stats;
+
+	if (IS_SS_ID_MU(gbe_dev))
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+	else
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+					GBENU_ET_STATS_PORT_SIZE;
+
+	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
+					 gbe_dev->num_et_stats, sizeof(u64),
+					 GFP_KERNEL);
+	if (!gbe_dev->hw_stats) {
+		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	gbe_dev->hw_stats_prev =
+		devm_kcalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats, sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
+	if (ret) {
+		dev_err(gbe_dev->dev,
+			"Can't translate of gbenu node(%s) addr at index %d\n",
+			node->name, GBENU_SM_REG_INDEX);
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(gbe_dev->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(gbe_dev->dev,
+			"Failed to map gbenu switch module register base\n");
+		return PTR_ERR(regs);
+	}
+	gbe_dev->switch_regs = regs;
+
+	if (!IS_SS_ID_2U(gbe_dev))
+		gbe_dev->sgmii_port_regs =
+		       gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+
+	/* Although sgmii modules are mem mapped to one contiguous
+	 * region on GBENU devices, setting sgmii_port34_regs allows
+	 * consistent code when accessing sgmii api
+	 */
+	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
+				     (2 * GBENU_SGMII_MODULE_SIZE);
+
+	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
+
+	for (i = 0; i < (gbe_dev->max_num_ports); i++)
+		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
+			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
+
+	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
+	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
+	gbe_dev->ale_ports = gbe_dev->max_num_ports;
+	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
+	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
+
+	/* Subsystem registers */
+	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+	/* ok to set for MU, but used by 2U only */
+	GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
+
+	/* Switch module registers */
+	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
+	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+
+	/* Host port registers */
+	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+
+	/* For NU only.  2U does not need tx_pri_map.
+	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
+	 * while 2U has only 1 such thread
+	 */
+	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+	return 0;
+}
+
+static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
+		     struct device_node *node, void **inst_priv)
+{
+	struct device_node *interfaces, *interface;
+	struct device_node *secondary_ports;
+	struct cpsw_ale_params ale_params;
+	struct gbe_priv *gbe_dev;
+	u32 slave_num;
+	int i, ret = 0;
+
+	if (!node) {
+		dev_err(dev, "device tree info unavailable\n");
+		return -ENODEV;
+	}
+
+	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
+	if (!gbe_dev)
+		return -ENOMEM;
+
+	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
+	    of_device_is_compatible(node, "ti,netcp-gbe")) {
+		gbe_dev->max_num_slaves = 4;
+	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
+		gbe_dev->max_num_slaves = 8;
+	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
+		gbe_dev->max_num_slaves = 1;
+		gbe_module.set_rx_mode = gbe_set_rx_mode;
+	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
+		gbe_dev->max_num_slaves = 2;
+	} else {
+		dev_err(dev, "device tree node for unknown device\n");
+		return -EINVAL;
+	}
+	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
+
+	gbe_dev->dev = dev;
+	gbe_dev->netcp_device = netcp_device;
+	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
+
+	/* init the hw stats lock */
+	spin_lock_init(&gbe_dev->hw_stats_lock);
+
+	if (of_find_property(node, "enable-ale", NULL)) {
+		gbe_dev->enable_ale = true;
+		dev_info(dev, "ALE enabled\n");
+	} else {
+		gbe_dev->enable_ale = false;
+		dev_dbg(dev, "ALE bypass enabled*\n");
+	}
+
+	ret = of_property_read_u32(node, "tx-queue",
+				   &gbe_dev->tx_queue_id);
+	if (ret < 0) {
+		dev_err(dev, "missing tx_queue parameter\n");
+		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
+	}
+
+	ret = of_property_read_string(node, "tx-channel",
+				      &gbe_dev->dma_chan_name);
+	if (ret < 0) {
+		dev_err(dev, "missing \"tx-channel\" parameter\n");
+		return -EINVAL;
+	}
+
+	if (!strcmp(node->name, "gbe")) {
+		ret = get_gbe_resource_version(gbe_dev, node);
+		if (ret)
+			return ret;
+
+		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
+
+		if (IS_SS_ID_VER_14(gbe_dev))
+			ret = set_gbe_ethss14_priv(gbe_dev, node);
+		else if (IS_SS_ID_MU(gbe_dev))
+			ret = set_gbenu_ethss_priv(gbe_dev, node);
+		else
+			ret = -ENODEV;
+
+	} else if (!strcmp(node->name, "xgbe")) {
+		ret = set_xgbe_ethss10_priv(gbe_dev, node);
+		if (ret)
+			return ret;
+		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
+					     gbe_dev->ss_regs);
+	} else {
+		dev_err(dev, "unknown GBE node(%s)\n", node->name);
+		ret = -ENODEV;
+	}
+
+	if (ret)
+		return ret;
+
+	interfaces = of_get_child_by_name(node, "interfaces");
+	if (!interfaces)
+		dev_err(dev, "could not find interfaces\n");
+
+	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
+				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
+	if (ret)
+		return ret;
+
+	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
+	if (ret)
+		return ret;
+
+	/* Create network interfaces */
+	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
+	for_each_child_of_node(interfaces, interface) {
+		ret = of_property_read_u32(interface, "slave-port", &slave_num);
+		if (ret) {
+			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
+				interface->name);
+			continue;
+		}
+		gbe_dev->num_slaves++;
+		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+			of_node_put(interface);
+			break;
+		}
+	}
+	of_node_put(interfaces);
+
+	if (!gbe_dev->num_slaves)
+		dev_warn(dev, "No network interface configured\n");
+
+	/* Initialize Secondary slave ports */
+	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
+	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
+	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
+		init_secondary_ports(gbe_dev, secondary_ports);
+	of_node_put(secondary_ports);
+
+	if (!gbe_dev->num_slaves) {
+		dev_err(dev,
+			"No network interface or secondary ports configured\n");
+		ret = -ENODEV;
+		goto free_sec_ports;
+	}
+
+	memset(&ale_params, 0, sizeof(ale_params));
+	ale_params.dev		= gbe_dev->dev;
+	ale_params.ale_regs	= gbe_dev->ale_reg;
+	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
+	ale_params.ale_entries	= gbe_dev->ale_entries;
+	ale_params.ale_ports	= gbe_dev->ale_ports;
+	if (IS_SS_ID_MU(gbe_dev)) {
+		ale_params.major_ver_mask = 0x7;
+		ale_params.nu_switch_ale = true;
+	}
+	gbe_dev->ale = cpsw_ale_create(&ale_params);
+	if (!gbe_dev->ale) {
+		dev_err(gbe_dev->dev, "error initializing ale engine\n");
+		ret = -ENODEV;
+		goto free_sec_ports;
+	} else {
+		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
+	}
+
+	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
+	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
+		ret = PTR_ERR(gbe_dev->cpts);
+		goto free_sec_ports;
+	}
+
+	/* initialize host port */
+	gbe_init_host_port(gbe_dev);
+
+	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+		if (IS_SS_ID_VER_14(gbe_dev))
+			gbe_reset_mod_stats_ver14(gbe_dev, i);
+		else
+			gbe_reset_mod_stats(gbe_dev, i);
+	}
+	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
+	timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
+	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
+	add_timer(&gbe_dev->timer);
+	*inst_priv = gbe_dev;
+	return 0;
+
+free_sec_ports:
+	free_secondary_ports(gbe_dev);
+	return ret;
+}
+
+static int gbe_attach(void *inst_priv, struct net_device *ndev,
+		      struct device_node *node, void **intf_priv)
+{
+	struct gbe_priv *gbe_dev = inst_priv;
+	struct gbe_intf *gbe_intf;
+	int ret;
+
+	if (!node) {
+		dev_err(gbe_dev->dev, "interface node not available\n");
+		return -ENODEV;
+	}
+
+	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
+	if (!gbe_intf)
+		return -ENOMEM;
+
+	gbe_intf->ndev = ndev;
+	gbe_intf->dev = gbe_dev->dev;
+	gbe_intf->gbe_dev = gbe_dev;
+
+	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
+					sizeof(*gbe_intf->slave),
+					GFP_KERNEL);
+	if (!gbe_intf->slave) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
+	ndev->ethtool_ops = &keystone_ethtool_ops;
+	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
+	*intf_priv = gbe_intf;
+	return 0;
+
+fail:
+	if (gbe_intf->slave)
+		devm_kfree(gbe_dev->dev, gbe_intf->slave);
+	if (gbe_intf)
+		devm_kfree(gbe_dev->dev, gbe_intf);
+	return ret;
+}
+
+static int gbe_release(void *intf_priv)
+{
+	struct gbe_intf *gbe_intf = intf_priv;
+
+	gbe_intf->ndev->ethtool_ops = NULL;
+	list_del(&gbe_intf->gbe_intf_list);
+	devm_kfree(gbe_intf->dev, gbe_intf->slave);
+	devm_kfree(gbe_intf->dev, gbe_intf);
+	return 0;
+}
+
+static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
+{
+	struct gbe_priv *gbe_dev = inst_priv;
+
+	del_timer_sync(&gbe_dev->timer);
+	cpts_release(gbe_dev->cpts);
+	cpsw_ale_stop(gbe_dev->ale);
+	netcp_txpipe_close(&gbe_dev->tx_pipe);
+	free_secondary_ports(gbe_dev);
+
+	if (!list_empty(&gbe_dev->gbe_intf_head))
+		dev_alert(gbe_dev->dev,
+			  "unreleased ethss interfaces present\n");
+
+	return 0;
+}
+
+static struct netcp_module gbe_module = {
+	.name		= GBE_MODULE_NAME,
+	.owner		= THIS_MODULE,
+	.primary	= true,
+	.probe		= gbe_probe,
+	.open		= gbe_open,
+	.close		= gbe_close,
+	.remove		= gbe_remove,
+	.attach		= gbe_attach,
+	.release	= gbe_release,
+	.add_addr	= gbe_add_addr,
+	.del_addr	= gbe_del_addr,
+	.add_vid	= gbe_add_vid,
+	.del_vid	= gbe_del_vid,
+	.ioctl		= gbe_ioctl,
+};
+
+static struct netcp_module xgbe_module = {
+	.name		= XGBE_MODULE_NAME,
+	.owner		= THIS_MODULE,
+	.primary	= true,
+	.probe		= gbe_probe,
+	.open		= gbe_open,
+	.close		= gbe_close,
+	.remove		= gbe_remove,
+	.attach		= gbe_attach,
+	.release	= gbe_release,
+	.add_addr	= gbe_add_addr,
+	.del_addr	= gbe_del_addr,
+	.add_vid	= gbe_add_vid,
+	.del_vid	= gbe_del_vid,
+	.ioctl		= gbe_ioctl,
+};
+
+static int __init keystone_gbe_init(void)
+{
+	int ret;
+
+	ret = netcp_register_module(&gbe_module);
+	if (ret)
+		return ret;
+
+	ret = netcp_register_module(&xgbe_module);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+module_init(keystone_gbe_init);
+
+static void __exit keystone_gbe_exit(void)
+{
+	netcp_unregister_module(&gbe_module);
+	netcp_unregister_module(&xgbe_module);
+}
+module_exit(keystone_gbe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
new file mode 100644
index 0000000..5d8419f
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -0,0 +1,157 @@
+/*
+ * SGMI module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:	Sandeep Nair <sandeep_n@ti.com>
+ *		Sandeep Paulraj <s-paulraj@ti.com>
+ *		Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "netcp.h"
+
+#define SGMII_SRESET_RESET		BIT(0)
+#define SGMII_SRESET_RTRESET		BIT(1)
+
+#define SGMII_REG_STATUS_LOCK		BIT(4)
+#define	SGMII_REG_STATUS_LINK		BIT(0)
+#define SGMII_REG_STATUS_AUTONEG	BIT(2)
+#define SGMII_REG_CONTROL_AUTONEG	BIT(0)
+
+#define SGMII23_OFFSET(x)	((x - 2) * 0x100)
+#define SGMII_OFFSET(x)		((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x)))
+
+/* SGMII registers */
+#define SGMII_SRESET_REG(x)   (SGMII_OFFSET(x) + 0x004)
+#define SGMII_CTL_REG(x)      (SGMII_OFFSET(x) + 0x010)
+#define SGMII_STATUS_REG(x)   (SGMII_OFFSET(x) + 0x014)
+#define SGMII_MRADV_REG(x)    (SGMII_OFFSET(x) + 0x018)
+
+static void sgmii_write_reg(void __iomem *base, int reg, u32 val)
+{
+	writel(val, base + reg);
+}
+
+static u32 sgmii_read_reg(void __iomem *base, int reg)
+{
+	return readl(base + reg);
+}
+
+static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
+{
+	writel((readl(base + reg) | val), base + reg);
+}
+
+/* port is 0 based */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
+{
+	/* Soft reset */
+	sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
+			    SGMII_SRESET_RESET);
+
+	while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
+		SGMII_SRESET_RESET) != 0x0)
+		;
+
+	return 0;
+}
+
+/* port is 0 based */
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
+{
+	u32 reg;
+	bool oldval;
+
+	/* Initiate a soft reset */
+	reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
+	oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
+	if (set)
+		reg |= SGMII_SRESET_RTRESET;
+	else
+		reg &= ~SGMII_SRESET_RTRESET;
+	sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
+	wmb();
+
+	return oldval;
+}
+
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
+{
+	u32 status = 0, link = 0;
+
+	status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+	if ((status & SGMII_REG_STATUS_LINK) != 0)
+		link = 1;
+	return link;
+}
+
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface)
+{
+	unsigned int i, status, mask;
+	u32 mr_adv_ability;
+	u32 control;
+
+	switch (interface) {
+	case SGMII_LINK_MAC_MAC_AUTONEG:
+		mr_adv_ability	= 0x9801;
+		control		= 0x21;
+		break;
+
+	case SGMII_LINK_MAC_PHY:
+	case SGMII_LINK_MAC_PHY_NO_MDIO:
+		mr_adv_ability	= 1;
+		control		= 1;
+		break;
+
+	case SGMII_LINK_MAC_MAC_FORCED:
+		mr_adv_ability	= 0x9801;
+		control		= 0x20;
+		break;
+
+	case SGMII_LINK_MAC_FIBER:
+		mr_adv_ability	= 0x20;
+		control		= 0x1;
+		break;
+
+	default:
+		WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface);
+		return -EINVAL;
+	}
+
+	sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0);
+
+	/* Wait for the SerDes pll to lock */
+	for (i = 0; i < 1000; i++)  {
+		usleep_range(1000, 2000);
+		status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+		if ((status & SGMII_REG_STATUS_LOCK) != 0)
+			break;
+	}
+
+	if ((status & SGMII_REG_STATUS_LOCK) == 0)
+		pr_err("serdes PLL not locked\n");
+
+	sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability);
+	sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control);
+
+	mask = SGMII_REG_STATUS_LINK;
+	if (control & SGMII_REG_CONTROL_AUTONEG)
+		mask |= SGMII_REG_STATUS_AUTONEG;
+
+	for (i = 0; i < 1000; i++)  {
+		usleep_range(200, 500);
+		status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+		if ((status & mask) == mask)
+			break;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
new file mode 100644
index 0000000..33571ac
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -0,0 +1,501 @@
+/*
+ * XGE PCSR module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:	Sandeep Nair <sandeep_n@ti.com>
+ *		WingMan Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "netcp.h"
+
+/* XGBE registers */
+#define XGBE_CTRL_OFFSET		0x0c
+#define XGBE_SGMII_1_OFFSET		0x0114
+#define XGBE_SGMII_2_OFFSET		0x0214
+
+/* PCS-R registers */
+#define PCSR_CPU_CTRL_OFFSET		0x1fd0
+#define POR_EN				BIT(29)
+
+#define reg_rmw(addr, value, mask) \
+	writel(((readl(addr) & (~(mask))) | \
+			(value & (mask))), (addr))
+
+/* bit mask of width w at offset s */
+#define MASK_WID_SH(w, s)		(((1 << w) - 1) << s)
+
+/* shift value v to offset s */
+#define VAL_SH(v, s)			(v << s)
+
+#define PHY_A(serdes)			0
+
+struct serdes_cfg {
+	u32 ofs;
+	u32 val;
+	u32 mask;
+};
+
+static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = {
+	{0x0000, 0x00800002, 0x00ff00ff},
+	{0x0014, 0x00003838, 0x0000ffff},
+	{0x0060, 0x1c44e438, 0xffffffff},
+	{0x0064, 0x00c18400, 0x00ffffff},
+	{0x0068, 0x17078200, 0xffffff00},
+	{0x006c, 0x00000014, 0x000000ff},
+	{0x0078, 0x0000c000, 0x0000ff00},
+	{0x0000, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = {
+	{0x0c00, 0x00030002, 0x00ff00ff},
+	{0x0c14, 0x00005252, 0x0000ffff},
+	{0x0c28, 0x80000000, 0xff000000},
+	{0x0c2c, 0x000000f6, 0x000000ff},
+	{0x0c3c, 0x04000405, 0xff00ffff},
+	{0x0c40, 0xc0800000, 0xffff0000},
+	{0x0c44, 0x5a202062, 0xffffffff},
+	{0x0c48, 0x40040424, 0xffffffff},
+	{0x0c4c, 0x00004002, 0x0000ffff},
+	{0x0c50, 0x19001c00, 0xff00ff00},
+	{0x0c54, 0x00002100, 0x0000ff00},
+	{0x0c58, 0x00000060, 0x000000ff},
+	{0x0c60, 0x80131e7c, 0xffffffff},
+	{0x0c64, 0x8400cb02, 0xff00ffff},
+	{0x0c68, 0x17078200, 0xffffff00},
+	{0x0c6c, 0x00000016, 0x000000ff},
+	{0x0c74, 0x00000400, 0x0000ff00},
+	{0x0c78, 0x0000c000, 0x0000ff00},
+	{0x0c00, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = {
+	{0x0204, 0x00000080, 0x000000ff},
+	{0x0208, 0x0000920d, 0x0000ffff},
+	{0x0204, 0xfc000000, 0xff000000},
+	{0x0208, 0x00009104, 0x0000ffff},
+	{0x0210, 0x1a000000, 0xff000000},
+	{0x0214, 0x00006b58, 0x00ffffff},
+	{0x0218, 0x75800084, 0xffff00ff},
+	{0x022c, 0x00300000, 0x00ff0000},
+	{0x0230, 0x00003800, 0x0000ff00},
+	{0x024c, 0x008f0000, 0x00ff0000},
+	{0x0250, 0x30000000, 0xff000000},
+	{0x0260, 0x00000002, 0x000000ff},
+	{0x0264, 0x00000057, 0x000000ff},
+	{0x0268, 0x00575700, 0x00ffff00},
+	{0x0278, 0xff000000, 0xff000000},
+	{0x0280, 0x00500050, 0x00ff00ff},
+	{0x0284, 0x00001f15, 0x0000ffff},
+	{0x028c, 0x00006f00, 0x0000ff00},
+	{0x0294, 0x00000000, 0xffffff00},
+	{0x0298, 0x00002640, 0xff00ffff},
+	{0x029c, 0x00000003, 0x000000ff},
+	{0x02a4, 0x00000f13, 0x0000ffff},
+	{0x02a8, 0x0001b600, 0x00ffff00},
+	{0x0380, 0x00000030, 0x000000ff},
+	{0x03c0, 0x00000200, 0x0000ff00},
+	{0x03cc, 0x00000018, 0x000000ff},
+	{0x03cc, 0x00000000, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = {
+	{0x0a00, 0x00000800, 0x0000ff00},
+	{0x0a84, 0x00000000, 0x000000ff},
+	{0x0a8c, 0x00130000, 0x00ff0000},
+	{0x0a90, 0x77a00000, 0xffff0000},
+	{0x0a94, 0x00007777, 0x0000ffff},
+	{0x0b08, 0x000f0000, 0xffff0000},
+	{0x0b0c, 0x000f0000, 0x00ffffff},
+	{0x0b10, 0xbe000000, 0xff000000},
+	{0x0b14, 0x000000ff, 0x000000ff},
+	{0x0b18, 0x00000014, 0x000000ff},
+	{0x0b5c, 0x981b0000, 0xffff0000},
+	{0x0b64, 0x00001100, 0x0000ff00},
+	{0x0b78, 0x00000c00, 0x0000ff00},
+	{0x0abc, 0xff000000, 0xff000000},
+	{0x0ac0, 0x0000008b, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_cm_c1_c2[] = {
+	{0x0208, 0x00000000, 0x00000f00},
+	{0x0208, 0x00000000, 0x0000001f},
+	{0x0204, 0x00000000, 0x00040000},
+	{0x0208, 0x000000a0, 0x000000e0},
+};
+
+static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs)
+{
+	int i;
+
+	/* cmu0 setup */
+	for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) {
+		reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs,
+			cfg_phyb_1p25g_156p25mhz_cmu0[i].val,
+			cfg_phyb_1p25g_156p25mhz_cmu0[i].mask);
+	}
+
+	/* cmu1 setup */
+	for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) {
+		reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs,
+			cfg_phyb_10p3125g_156p25mhz_cmu1[i].val,
+			cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask);
+	}
+}
+
+/* lane is 0 based */
+static void netcp_xgbe_serdes_lane_config(
+			void __iomem *serdes_regs, int lane)
+{
+	int i;
+
+	/* lane setup */
+	for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) {
+		reg_rmw(serdes_regs +
+				cfg_phyb_10p3125g_16bit_lane[i].ofs +
+				(0x200 * lane),
+			cfg_phyb_10p3125g_16bit_lane[i].val,
+			cfg_phyb_10p3125g_16bit_lane[i].mask);
+	}
+
+	/* disable auto negotiation*/
+	reg_rmw(serdes_regs + (0x200 * lane) + 0x0380,
+		0x00000000, 0x00000010);
+
+	/* disable link training */
+	reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0,
+		0x00000000, 0x00000200);
+}
+
+static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) {
+		reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs,
+			cfg_phyb_10p3125g_comlane[i].val,
+			cfg_phyb_10p3125g_comlane[i].mask);
+	}
+}
+
+static void netcp_xgbe_serdes_lane_enable(
+			void __iomem *serdes_regs, int lane)
+{
+	/* Set Lane Control Rate */
+	writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane));
+}
+
+static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs)
+{
+	reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff);
+}
+
+static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs)
+{
+	writel(0x88000000, serdes_regs + 0x1ff4);
+}
+
+static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs)
+{
+	netcp_xgbe_serdes_phyb_rst_clr(serdes_regs);
+	writel(0xee000000, serdes_regs + 0x1ff4);
+}
+
+static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs)
+{
+	unsigned long timeout;
+	int ret = 0;
+	u32 val_1, val_0;
+
+	timeout = jiffies + msecs_to_jiffies(500);
+	do {
+		val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4));
+		val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4));
+
+		if (val_1 && val_0)
+			return 0;
+
+		if (time_after(jiffies, timeout)) {
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+		cpu_relax();
+	} while (true);
+
+	pr_err("XGBE serdes not locked: time out.\n");
+	return ret;
+}
+
+static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs)
+{
+	writel(0x03, sw_regs + XGBE_CTRL_OFFSET);
+}
+
+static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs)
+{
+	u32 tmp;
+
+	if (PHY_A(serdes_regs)) {
+		tmp  = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff;
+		tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00);
+	} else {
+		tmp  = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff;
+	}
+
+	return tmp;
+}
+
+static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs,
+					      int select, int ofs)
+{
+	if (PHY_A(serdes_regs)) {
+		reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24,
+			~0x00ffffff);
+		return;
+	}
+
+	/* For 2 lane Phy-B, lane0 is actually lane1 */
+	switch (select) {
+	case 1:
+		select = 2;
+		break;
+	case 2:
+		select = 3;
+		break;
+	default:
+		return;
+	}
+
+	reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff);
+}
+
+static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs,
+					      int select, int ofs)
+{
+	/* Set tbus address */
+	netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs);
+	/* Get TBUS Value */
+	return netcp_xgbe_serdes_read_tbus_val(serdes_regs);
+}
+
+static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs,
+					void __iomem *sig_detect_reg, int lane)
+{
+	u32 tmp, dlpf, tbus;
+
+	/*Get the DLPF values */
+	tmp = netcp_xgbe_serdes_read_select_tbus(
+			serdes_regs, lane + 1, 5);
+
+	dlpf = tmp >> 2;
+
+	if (dlpf < 400 || dlpf > 700) {
+		reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1));
+		mdelay(1);
+		reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1));
+	} else {
+		tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane +
+							  1, 0xe);
+
+		pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n",
+			 tmp >> 2, tmp & 3, (tbus >> 2) & 3);
+	}
+}
+
+/* Call every 100 ms */
+static int netcp_xgbe_check_link_status(void __iomem *serdes_regs,
+					void __iomem *sw_regs, u32 lanes,
+					u32 *current_state, u32 *lane_down)
+{
+	void __iomem *pcsr_base = sw_regs + 0x0600;
+	void __iomem *sig_detect_reg;
+	u32 pcsr_rx_stat, blk_lock, blk_errs;
+	int loss, i, status = 1;
+
+	for (i = 0; i < lanes; i++) {
+		/* Get the Loss bit */
+		loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1;
+
+		/* Get Block Errors and Block Lock bits */
+		pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80));
+		blk_lock = (pcsr_rx_stat >> 30) & 0x1;
+		blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
+
+		/* Get Signal Detect Overlay Address */
+		sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04;
+
+		/* If Block errors maxed out, attempt recovery! */
+		if (blk_errs == 0x0ff)
+			blk_lock = 0;
+
+		switch (current_state[i]) {
+		case 0:
+			/* if good link lock the signal detect ON! */
+			if (!loss && blk_lock) {
+				pr_debug("XGBE PCSR Linked Lane: %d\n", i);
+				reg_rmw(sig_detect_reg, VAL_SH(3, 1),
+					MASK_WID_SH(2, 1));
+				current_state[i] = 1;
+			} else if (!blk_lock) {
+				/* if no lock, then reset CDR */
+				pr_debug("XGBE PCSR Recover Lane: %d\n", i);
+				netcp_xgbe_serdes_reset_cdr(serdes_regs,
+							    sig_detect_reg, i);
+			}
+			break;
+
+		case 1:
+			if (!blk_lock) {
+				/* Link Lost? */
+				lane_down[i] = 1;
+				current_state[i] = 2;
+			}
+			break;
+
+		case 2:
+			if (blk_lock)
+				/* Nope just noise */
+				current_state[i] = 1;
+			else {
+				/* Lost the block lock, reset CDR if it is
+				 * not centered and go back to sync state
+				 */
+				netcp_xgbe_serdes_reset_cdr(serdes_regs,
+							    sig_detect_reg, i);
+				current_state[i] = 0;
+			}
+			break;
+
+		default:
+			pr_err("XGBE: unknown current_state[%d] %d\n",
+			       i, current_state[i]);
+			break;
+		}
+
+		if (blk_errs > 0) {
+			/* Reset the Error counts! */
+			reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0),
+				MASK_WID_SH(8, 0));
+
+			reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0),
+				MASK_WID_SH(8, 0));
+		}
+
+		status &= (current_state[i] == 1);
+	}
+
+	return status;
+}
+
+static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs,
+					void __iomem *sw_regs)
+{
+	u32 current_state[2] = {0, 0};
+	int retries = 0, link_up;
+	u32 lane_down[2];
+
+	do {
+		lane_down[0] = 0;
+		lane_down[1] = 0;
+
+		link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2,
+						       current_state,
+						       lane_down);
+
+		/* if we did not get link up then wait 100ms before calling
+		 * it again
+		 */
+		if (link_up)
+			break;
+
+		if (lane_down[0])
+			pr_debug("XGBE: detected link down on lane 0\n");
+
+		if (lane_down[1])
+			pr_debug("XGBE: detected link down on lane 1\n");
+
+		if (++retries > 1) {
+			pr_debug("XGBE: timeout waiting for serdes link up\n");
+			return -ETIMEDOUT;
+		}
+		mdelay(100);
+	} while (!link_up);
+
+	pr_debug("XGBE: PCSR link is up\n");
+	return 0;
+}
+
+static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs,
+					     int lane, int cm, int c1, int c2)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) {
+		reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane),
+			cfg_cm_c1_c2[i].val,
+			cfg_cm_c1_c2[i].mask);
+	}
+}
+
+static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs)
+{
+	/* Toggle the POR_EN bit in CONFIG.CPU_CTRL */
+	/* enable POR_EN bit */
+	reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN);
+	usleep_range(10, 100);
+
+	/* disable POR_EN bit */
+	reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN);
+	usleep_range(10, 100);
+}
+
+static int netcp_xgbe_serdes_config(void __iomem *serdes_regs,
+				    void __iomem *sw_regs)
+{
+	u32 ret, i;
+
+	netcp_xgbe_serdes_pll_disable(serdes_regs);
+	netcp_xgbe_serdes_cmu_init(serdes_regs);
+
+	for (i = 0; i < 2; i++)
+		netcp_xgbe_serdes_lane_config(serdes_regs, i);
+
+	netcp_xgbe_serdes_com_enable(serdes_regs);
+	/* This is EVM + RTM-BOC specific */
+	for (i = 0; i < 2; i++)
+		netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5);
+
+	netcp_xgbe_serdes_pll_enable(serdes_regs);
+	for (i = 0; i < 2; i++)
+		netcp_xgbe_serdes_lane_enable(serdes_regs, i);
+
+	/* SB PLL Status Poll */
+	ret = netcp_xgbe_wait_pll_locked(sw_regs);
+	if (ret)
+		return ret;
+
+	netcp_xgbe_serdes_enable_xgmii_port(sw_regs);
+	netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs);
+	return ret;
+}
+
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs)
+{
+	u32 val;
+
+	/* read COMLANE bits 4:0 */
+	val = readl(serdes_regs + 0xa00);
+	if (val & 0x1f) {
+		pr_debug("XGBE: serdes already in operation - reset\n");
+		netcp_xgbe_reset_serdes(serdes_regs);
+	}
+	return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs);
+}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
new file mode 100644
index 0000000..93d1428
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -0,0 +1,3285 @@
+/*******************************************************************************
+ *
+ *  Linux ThunderLAN Driver
+ *
+ *  tlan.c
+ *  by James Banks
+ *
+ *  (C) 1997-1998 Caldera, Inc.
+ *  (C) 1998 James Banks
+ *  (C) 1999-2001 Torben Mathiasen
+ *  (C) 2002 Samuel Chessman
+ *
+ *  This software may be used and distributed according to the terms
+ *  of the GNU General Public License, incorporated herein by reference.
+ *
+ ** Useful (if not required) reading:
+ *
+ *		Texas Instruments, ThunderLAN Programmer's Guide,
+ *			TI Literature Number SPWU013A
+ *			available in PDF format from www.ti.com
+ *		Level One, LXT901 and LXT970 Data Sheets
+ *			available in PDF format from www.level1.com
+ *		National Semiconductor, DP83840A Data Sheet
+ *			available in PDF format from www.national.com
+ *		Microchip Technology, 24C01A/02A/04A Data Sheet
+ *			available in PDF format from www.microchip.com
+ *
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+
+#include "tlan.h"
+
+
+/* For removing EISA devices */
+static	struct net_device	*tlan_eisa_devices;
+
+static	int		tlan_devices_installed;
+
+/* Set speed, duplex and aui settings */
+static  int aui[MAX_TLAN_BOARDS];
+static  int duplex[MAX_TLAN_BOARDS];
+static  int speed[MAX_TLAN_BOARDS];
+static  int boards_found;
+module_param_array(aui, int, NULL, 0);
+module_param_array(duplex, int, NULL, 0);
+module_param_array(speed, int, NULL, 0);
+MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
+MODULE_PARM_DESC(duplex,
+		 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
+
+MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
+MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
+MODULE_LICENSE("GPL");
+
+/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+static  int		debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
+
+static	const char tlan_signature[] = "TLAN";
+static  const char tlan_banner[] = "ThunderLAN driver v1.17\n";
+static  int tlan_have_pci;
+static  int tlan_have_eisa;
+
+static const char * const media[] = {
+	"10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+	"100BaseTx-FD", "100BaseT4", NULL
+};
+
+static struct board {
+	const char	*device_label;
+	u32		flags;
+	u16		addr_ofs;
+} board_info[] = {
+	{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+	{ "Compaq Netelligent 10/100 TX PCI UTP",
+	  TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+	{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq NetFlex-3/P",
+	  TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+	{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq Netelligent Integrated 10/100 TX UTP",
+	  TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+	{ "Compaq Netelligent Dual 10/100 TX PCI UTP",
+	  TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq Netelligent 10/100 TX Embedded UTP",
+	  TLAN_ADAPTER_NONE, 0x83 },
+	{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
+	{ "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
+	  TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+	{ "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
+	  TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+	{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+	{ "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq NetFlex-3/E",
+	  TLAN_ADAPTER_ACTIVITY_LED |	/* EISA card */
+	  TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+	{ "Compaq NetFlex-3/E",
+	  TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+};
+
+static const struct pci_device_id tlan_pci_tbl[] = {
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+	{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+	{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+	{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
+
+static void	tlan_eisa_probe(void);
+static void	tlan_eisa_cleanup(void);
+static int      tlan_init(struct net_device *);
+static int	tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int	tlan_close(struct net_device *);
+static struct	net_device_stats *tlan_get_stats(struct net_device *);
+static void	tlan_set_multicast_list(struct net_device *);
+static int	tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int      tlan_probe1(struct pci_dev *pdev, long ioaddr,
+			    int irq, int rev, const struct pci_device_id *ent);
+static void	tlan_tx_timeout(struct net_device *dev);
+static void	tlan_tx_timeout_work(struct work_struct *work);
+static int	tlan_init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent);
+
+static u32	tlan_handle_tx_eof(struct net_device *, u16);
+static u32	tlan_handle_stat_overflow(struct net_device *, u16);
+static u32	tlan_handle_rx_eof(struct net_device *, u16);
+static u32	tlan_handle_dummy(struct net_device *, u16);
+static u32	tlan_handle_tx_eoc(struct net_device *, u16);
+static u32	tlan_handle_status_check(struct net_device *, u16);
+static u32	tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void	tlan_timer(struct timer_list *t);
+static void	tlan_phy_monitor(struct timer_list *t);
+
+static void	tlan_reset_lists(struct net_device *);
+static void	tlan_free_lists(struct net_device *);
+static void	tlan_print_dio(u16);
+static void	tlan_print_list(struct tlan_list *, char *, int);
+static void	tlan_read_and_clear_stats(struct net_device *, int);
+static void	tlan_reset_adapter(struct net_device *);
+static void	tlan_finish_reset(struct net_device *);
+static void	tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void	tlan_phy_print(struct net_device *);
+static void	tlan_phy_detect(struct net_device *);
+static void	tlan_phy_power_down(struct net_device *);
+static void	tlan_phy_power_up(struct net_device *);
+static void	tlan_phy_reset(struct net_device *);
+static void	tlan_phy_start_link(struct net_device *);
+static void	tlan_phy_finish_auto_neg(struct net_device *);
+
+/*
+  static int	tlan_phy_nop(struct net_device *);
+  static int	tlan_phy_internal_check(struct net_device *);
+  static int	tlan_phy_internal_service(struct net_device *);
+  static int	tlan_phy_dp83840a_check(struct net_device *);
+*/
+
+static bool	tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void	tlan_mii_send_data(u16, u32, unsigned);
+static void	tlan_mii_sync(u16);
+static void	tlan_mii_write_reg(struct net_device *, u16, u16, u16);
+
+static void	tlan_ee_send_start(u16);
+static int	tlan_ee_send_byte(u16, u8, int);
+static void	tlan_ee_receive_byte(u16, u8 *, int);
+static int	tlan_ee_read_byte(struct net_device *, u8, u8 *);
+
+
+static inline void
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
+{
+	unsigned long addr = (unsigned long)skb;
+	tag->buffer[9].address = addr;
+	tag->buffer[8].address = upper_32_bits(addr);
+}
+
+static inline struct sk_buff *
+tlan_get_skb(const struct tlan_list *tag)
+{
+	unsigned long addr;
+
+	addr = tag->buffer[9].address;
+	addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
+	return (struct sk_buff *) addr;
+}
+
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
+	NULL,
+	tlan_handle_tx_eof,
+	tlan_handle_stat_overflow,
+	tlan_handle_rx_eof,
+	tlan_handle_dummy,
+	tlan_handle_tx_eoc,
+	tlan_handle_status_check,
+	tlan_handle_rx_eoc
+};
+
+static inline void
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	unsigned long flags = 0;
+
+	if (!in_irq())
+		spin_lock_irqsave(&priv->lock, flags);
+	if (priv->timer.function != NULL &&
+	    priv->timer_type != TLAN_TIMER_ACTIVITY) {
+		if (!in_irq())
+			spin_unlock_irqrestore(&priv->lock, flags);
+		return;
+	}
+	priv->timer.function = tlan_timer;
+	if (!in_irq())
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+	priv->timer_set_at = jiffies;
+	priv->timer_type = type;
+	mod_timer(&priv->timer, jiffies + ticks);
+
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver primary functions
+
+these functions are more or less common to all linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+
+/***************************************************************
+ *	tlan_remove_one
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		None
+ *
+ *	Goes through the TLanDevices list and frees the device
+ *	structs and memory associated with each device (lists
+ *	and buffers).  It also ureserves the IO port regions
+ *	associated with this device.
+ *
+ **************************************************************/
+
+
+static void tlan_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tlan_priv	*priv = netdev_priv(dev);
+
+	unregister_netdev(dev);
+
+	if (priv->dma_storage) {
+		pci_free_consistent(priv->pci_dev,
+				    priv->dma_size, priv->dma_storage,
+				    priv->dma_storage_dma);
+	}
+
+#ifdef CONFIG_PCI
+	pci_release_regions(pdev);
+#endif
+
+	free_netdev(dev);
+
+	cancel_work_sync(&priv->tlan_tqueue);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+	tlan_reset_lists(dev);
+	/* NOTE: It might not be necessary to read the stats before a
+	   reset if you don't care what the values are.
+	*/
+	tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+	tlan_reset_adapter(dev);
+	netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+
+	del_timer_sync(&priv->media_timer);
+	tlan_read_and_clear_stats(dev, TLAN_RECORD);
+	outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+	/* Reset and power down phy */
+	tlan_reset_adapter(dev);
+	if (priv->timer.function != NULL) {
+		del_timer_sync(&priv->timer);
+		priv->timer.function = NULL;
+	}
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (netif_running(dev))
+		tlan_stop(dev);
+
+	netif_device_detach(dev);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_wake_from_d3(pdev, false);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	int rc = pci_enable_device(pdev);
+
+	if (rc)
+		return rc;
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	netif_device_attach(dev);
+
+	if (netif_running(dev))
+		tlan_start(dev);
+
+	return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend   NULL
+#define tlan_resume    NULL
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver tlan_driver = {
+	.name		= "tlan",
+	.id_table	= tlan_pci_tbl,
+	.probe		= tlan_init_one,
+	.remove		= tlan_remove_one,
+	.suspend	= tlan_suspend,
+	.resume		= tlan_resume,
+};
+
+static int __init tlan_probe(void)
+{
+	int rc = -ENODEV;
+
+	pr_info("%s", tlan_banner);
+
+	TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
+
+	/* Use new style PCI probing. Now the kernel will
+	   do most of this for us */
+	rc = pci_register_driver(&tlan_driver);
+
+	if (rc != 0) {
+		pr_err("Could not register pci driver\n");
+		goto err_out_pci_free;
+	}
+
+	TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
+	tlan_eisa_probe();
+
+	pr_info("%d device%s installed, PCI: %d  EISA: %d\n",
+		tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+		tlan_have_pci, tlan_have_eisa);
+
+	if (tlan_devices_installed == 0) {
+		rc = -ENODEV;
+		goto  err_out_pci_unreg;
+	}
+	return 0;
+
+err_out_pci_unreg:
+	pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+	return rc;
+}
+
+
+static int tlan_init_one(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	return tlan_probe1(pdev, -1, -1, 0, ent);
+}
+
+
+/*
+***************************************************************
+*	tlan_probe1
+*
+*	Returns:
+*		0 on success, error code on error
+*	Parms:
+*		none
+*
+*	The name is lower case to fit in with all the rest of
+*	the netcard_probe names.  This function looks for
+*	another TLan based adapter, setting it up with the
+*	allocated device struct if one is found.
+*	tlan_probe has been ported to the new net API and
+*	now allocates its own device structure. This function
+*	is also used by modules.
+*
+**************************************************************/
+
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
+		       const struct pci_device_id *ent)
+{
+
+	struct net_device  *dev;
+	struct tlan_priv  *priv;
+	u16		   device_id;
+	int		   reg, rc = -ENODEV;
+
+#ifdef CONFIG_PCI
+	if (pdev) {
+		rc = pci_enable_device(pdev);
+		if (rc)
+			return rc;
+
+		rc = pci_request_regions(pdev, tlan_signature);
+		if (rc) {
+			pr_err("Could not reserve IO regions\n");
+			goto err_out;
+		}
+	}
+#endif  /*  CONFIG_PCI  */
+
+	dev = alloc_etherdev(sizeof(struct tlan_priv));
+	if (dev == NULL) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	priv = netdev_priv(dev);
+
+	priv->pci_dev = pdev;
+	priv->dev = dev;
+
+	/* Is this a PCI device? */
+	if (pdev) {
+		u32		   pci_io_base = 0;
+
+		priv->adapter = &board_info[ent->driver_data];
+
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			pr_err("No suitable PCI mapping available\n");
+			goto err_out_free_dev;
+		}
+
+		for (reg = 0; reg <= 5; reg++) {
+			if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
+				pci_io_base = pci_resource_start(pdev, reg);
+				TLAN_DBG(TLAN_DEBUG_GNRL,
+					 "IO mapping is available at %x.\n",
+					 pci_io_base);
+				break;
+			}
+		}
+		if (!pci_io_base) {
+			pr_err("No IO mappings available\n");
+			rc = -EIO;
+			goto err_out_free_dev;
+		}
+
+		dev->base_addr = pci_io_base;
+		dev->irq = pdev->irq;
+		priv->adapter_rev = pdev->revision;
+		pci_set_master(pdev);
+		pci_set_drvdata(pdev, dev);
+
+	} else	{     /* EISA card */
+		/* This is a hack. We need to know which board structure
+		 * is suited for this adapter */
+		device_id = inw(ioaddr + EISA_ID2);
+		if (device_id == 0x20F1) {
+			priv->adapter = &board_info[13]; /* NetFlex-3/E */
+			priv->adapter_rev = 23;		/* TLAN 2.3 */
+		} else {
+			priv->adapter = &board_info[14];
+			priv->adapter_rev = 10;		/* TLAN 1.0 */
+		}
+		dev->base_addr = ioaddr;
+		dev->irq = irq;
+	}
+
+	/* Kernel parameters */
+	if (dev->mem_start) {
+		priv->aui    = dev->mem_start & 0x01;
+		priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
+			: (dev->mem_start & 0x06) >> 1;
+		priv->speed  = ((dev->mem_start & 0x18) == 0x18) ? 0
+			: (dev->mem_start & 0x18) >> 3;
+
+		if (priv->speed == 0x1)
+			priv->speed = TLAN_SPEED_10;
+		else if (priv->speed == 0x2)
+			priv->speed = TLAN_SPEED_100;
+
+		debug = priv->debug = dev->mem_end;
+	} else {
+		priv->aui    = aui[boards_found];
+		priv->speed  = speed[boards_found];
+		priv->duplex = duplex[boards_found];
+		priv->debug = debug;
+	}
+
+	/* This will be used when we get an adapter error from
+	 * within our irq handler */
+	INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
+
+	spin_lock_init(&priv->lock);
+
+	rc = tlan_init(dev);
+	if (rc) {
+		pr_err("Could not set up device\n");
+		goto err_out_free_dev;
+	}
+
+	rc = register_netdev(dev);
+	if (rc) {
+		pr_err("Could not register device\n");
+		goto err_out_uninit;
+	}
+
+
+	tlan_devices_installed++;
+	boards_found++;
+
+	/* pdev is NULL if this is an EISA device */
+	if (pdev)
+		tlan_have_pci++;
+	else {
+		priv->next_device = tlan_eisa_devices;
+		tlan_eisa_devices = dev;
+		tlan_have_eisa++;
+	}
+
+	netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+		    (int)dev->irq,
+		    (int)dev->base_addr,
+		    priv->adapter->device_label,
+		    priv->adapter_rev);
+	return 0;
+
+err_out_uninit:
+	pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+			    priv->dma_storage_dma);
+err_out_free_dev:
+	free_netdev(dev);
+err_out_regions:
+#ifdef CONFIG_PCI
+	if (pdev)
+		pci_release_regions(pdev);
+err_out:
+#endif
+	if (pdev)
+		pci_disable_device(pdev);
+	return rc;
+}
+
+
+static void tlan_eisa_cleanup(void)
+{
+	struct net_device *dev;
+	struct tlan_priv *priv;
+
+	while (tlan_have_eisa) {
+		dev = tlan_eisa_devices;
+		priv = netdev_priv(dev);
+		if (priv->dma_storage) {
+			pci_free_consistent(priv->pci_dev, priv->dma_size,
+					    priv->dma_storage,
+					    priv->dma_storage_dma);
+		}
+		release_region(dev->base_addr, 0x10);
+		unregister_netdev(dev);
+		tlan_eisa_devices = priv->next_device;
+		free_netdev(dev);
+		tlan_have_eisa--;
+	}
+}
+
+
+static void __exit tlan_exit(void)
+{
+	pci_unregister_driver(&tlan_driver);
+
+	if (tlan_have_eisa)
+		tlan_eisa_cleanup();
+
+}
+
+
+/* Module loading/unloading */
+module_init(tlan_probe);
+module_exit(tlan_exit);
+
+
+
+/**************************************************************
+ *	tlan_eisa_probe
+ *
+ *	Returns: 0 on success, 1 otherwise
+ *
+ *	Parms:	 None
+ *
+ *
+ *	This functions probes for EISA devices and calls
+ *	TLan_probe1 when one is found.
+ *
+ *************************************************************/
+
+static void  __init tlan_eisa_probe(void)
+{
+	long	ioaddr;
+	int	rc = -ENODEV;
+	int	irq;
+	u16	device_id;
+
+	if (!EISA_bus) {
+		TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
+		return;
+	}
+
+	/* Loop through all slots of the EISA bus */
+	for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+
+		TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+			 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+		TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+			 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
+
+
+		TLAN_DBG(TLAN_DEBUG_PROBE,
+			 "Probing for EISA adapter at IO: 0x%4x : ",
+			 (int) ioaddr);
+		if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
+			goto out;
+
+		if (inw(ioaddr + EISA_ID) != 0x110E) {
+			release_region(ioaddr, 0x10);
+			goto out;
+		}
+
+		device_id = inw(ioaddr + EISA_ID2);
+		if (device_id !=  0x20F1 && device_id != 0x40F1) {
+			release_region(ioaddr, 0x10);
+			goto out;
+		}
+
+		/* check if adapter is enabled */
+		if (inb(ioaddr + EISA_CR) != 0x1) {
+			release_region(ioaddr, 0x10);
+			goto out2;
+		}
+
+		if (debug == 0x10)
+			pr_info("Found one\n");
+
+
+		/* Get irq from board */
+		switch (inb(ioaddr + 0xcc0)) {
+		case(0x10):
+			irq = 5;
+			break;
+		case(0x20):
+			irq = 9;
+			break;
+		case(0x40):
+			irq = 10;
+			break;
+		case(0x80):
+			irq = 11;
+			break;
+		default:
+			goto out;
+		}
+
+
+		/* Setup the newly found eisa adapter */
+		rc = tlan_probe1(NULL, ioaddr, irq,
+				 12, NULL);
+		continue;
+
+out:
+		if (debug == 0x10)
+			pr_info("None found\n");
+		continue;
+
+out2:
+		if (debug == 0x10)
+			pr_info("Card found but it is not enabled, skipping\n");
+		continue;
+
+	}
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tlan_poll(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	tlan_handle_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static const struct net_device_ops tlan_netdev_ops = {
+	.ndo_open		= tlan_open,
+	.ndo_stop		= tlan_close,
+	.ndo_start_xmit		= tlan_start_tx,
+	.ndo_tx_timeout		= tlan_tx_timeout,
+	.ndo_get_stats		= tlan_get_stats,
+	.ndo_set_rx_mode	= tlan_set_multicast_list,
+	.ndo_do_ioctl		= tlan_ioctl,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	 = tlan_poll,
+#endif
+};
+
+static void tlan_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	if (priv->pci_dev)
+		strlcpy(info->bus_info, pci_name(priv->pci_dev),
+			sizeof(info->bus_info));
+	else
+		strlcpy(info->bus_info, "EISA",	sizeof(info->bus_info));
+}
+
+static int tlan_get_eeprom_len(struct net_device *dev)
+{
+	return TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom(struct net_device *dev,
+			   struct ethtool_eeprom *eeprom, u8 *data)
+{
+	int i;
+
+	for (i = 0; i < TLAN_EEPROM_SIZE; i++)
+		if (tlan_ee_read_byte(dev, i, &data[i]))
+			return -EIO;
+
+	return 0;
+}
+
+static const struct ethtool_ops tlan_ethtool_ops = {
+	.get_drvinfo	= tlan_get_drvinfo,
+	.get_link	= ethtool_op_get_link,
+	.get_eeprom_len	= tlan_get_eeprom_len,
+	.get_eeprom	= tlan_get_eeprom,
+};
+
+/***************************************************************
+ *	tlan_init
+ *
+ *	Returns:
+ *		0 on success, error code otherwise.
+ *	Parms:
+ *		dev	The structure of the device to be
+ *			init'ed.
+ *
+ *	This function completes the initialization of the
+ *	device structure and driver.  It reserves the IO
+ *	addresses, allocates memory for the lists and bounce
+ *	buffers, retrieves the MAC address from the eeprom
+ *	and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
+{
+	int		dma_size;
+	int		err;
+	int		i;
+	struct tlan_priv	*priv;
+
+	priv = netdev_priv(dev);
+
+	dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+		* (sizeof(struct tlan_list));
+	priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+						 dma_size,
+						 &priv->dma_storage_dma);
+	priv->dma_size = dma_size;
+
+	if (priv->dma_storage == NULL) {
+		pr_err("Could not allocate lists and buffers for %s\n",
+		       dev->name);
+		return -ENOMEM;
+	}
+	memset(priv->dma_storage, 0, dma_size);
+	priv->rx_list = (struct tlan_list *)
+		ALIGN((unsigned long)priv->dma_storage, 8);
+	priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+	priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+	priv->tx_list_dma =
+		priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
+
+	err = 0;
+	for (i = 0; i < ETH_ALEN; i++)
+		err |= tlan_ee_read_byte(dev,
+					 (u8) priv->adapter->addr_ofs + i,
+					 (u8 *) &dev->dev_addr[i]);
+	if (err) {
+		pr_err("%s: Error reading MAC from eeprom: %d\n",
+		       dev->name, err);
+	}
+	/* Olicom OC-2325/OC-2326 have the address byte-swapped */
+	if (priv->adapter->addr_ofs == 0xf8) {
+		for (i = 0; i < ETH_ALEN; i += 2) {
+			char tmp = dev->dev_addr[i];
+			dev->dev_addr[i] = dev->dev_addr[i + 1];
+			dev->dev_addr[i + 1] = tmp;
+		}
+	}
+
+	netif_carrier_off(dev);
+
+	/* Device methods */
+	dev->netdev_ops = &tlan_netdev_ops;
+	dev->ethtool_ops = &tlan_ethtool_ops;
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	return 0;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_open
+ *
+ *	Returns:
+ *		0 on success, error code otherwise.
+ *	Parms:
+ *		dev	Structure of device to be opened.
+ *
+ *	This routine puts the driver and TLAN adapter in a
+ *	state where it is ready to send and receive packets.
+ *	It allocates the IRQ, resets and brings the adapter
+ *	out of reset, and allows interrupts.  It also delays
+ *	the startup for autonegotiation or sends a Rx GO
+ *	command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+static int tlan_open(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	int		err;
+
+	priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+	err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+			  dev->name, dev);
+
+	if (err) {
+		netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+			   dev->irq);
+		return err;
+	}
+
+	timer_setup(&priv->timer, NULL, 0);
+	timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
+
+	tlan_start(dev);
+
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
+		 dev->name, priv->tlan_rev);
+
+	return 0;
+
+}
+
+
+
+/**************************************************************
+ *	tlan_ioctl
+ *
+ *	Returns:
+ *		0 on success, error code otherwise
+ *	Params:
+ *		dev	structure of device to receive ioctl.
+ *
+ *		rq	ifreq structure to hold userspace data.
+ *
+ *		cmd	ioctl command.
+ *
+ *
+ *************************************************************/
+
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	struct mii_ioctl_data *data = if_mii(rq);
+	u32 phy   = priv->phy[priv->phy_num];
+
+	if (!priv->phy_online)
+		return -EAGAIN;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:		/* get address of MII PHY in use. */
+		data->phy_id = phy;
+		/* fall through */
+
+
+	case SIOCGMIIREG:		/* read MII PHY register. */
+		tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+				  data->reg_num & 0x1f, &data->val_out);
+		return 0;
+
+
+	case SIOCSMIIREG:		/* write MII PHY register. */
+		tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+				   data->reg_num & 0x1f, data->val_in);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+
+/***************************************************************
+ *	tlan_tx_timeout
+ *
+ *	Returns: nothing
+ *
+ *	Params:
+ *		dev	structure of device which timed out
+ *			during transmit.
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout(struct net_device *dev)
+{
+
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+
+	/* Ok so we timed out, lets see what we can do about it...*/
+	tlan_free_lists(dev);
+	tlan_reset_lists(dev);
+	tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+	tlan_reset_adapter(dev);
+	netif_trans_update(dev); /* prevent tx timeout */
+	netif_wake_queue(dev);
+
+}
+
+
+/***************************************************************
+ *	tlan_tx_timeout_work
+ *
+ *	Returns: nothing
+ *
+ *	Params:
+ *		work	work item of device which timed out
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout_work(struct work_struct *work)
+{
+	struct tlan_priv	*priv =
+		container_of(work, struct tlan_priv, tlan_tqueue);
+
+	tlan_tx_timeout(priv->dev);
+}
+
+
+
+/***************************************************************
+ *	tlan_start_tx
+ *
+ *	Returns:
+ *		0 on success, non-zero on failure.
+ *	Parms:
+ *		skb	A pointer to the sk_buff containing the
+ *			frame to be sent.
+ *		dev	The device to send the data on.
+ *
+ *	This function adds a frame to the Tx list to be sent
+ *	ASAP.  First it	verifies that the adapter is ready and
+ *	there is room in the queue.  Then it sets up the next
+ *	available list, copies the frame to the	corresponding
+ *	buffer.  If the adapter Tx channel is idle, it gives
+ *	the adapter a Tx Go command on the list, otherwise it
+ *	sets the forward address of the previous list to point
+ *	to this one.  Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	dma_addr_t	tail_list_phys;
+	struct tlan_list	*tail_list;
+	unsigned long	flags;
+	unsigned int    txlen;
+
+	if (!priv->phy_online) {
+		TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
+			 dev->name);
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
+		return NETDEV_TX_OK;
+	txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
+
+	tail_list = priv->tx_list + priv->tx_tail;
+	tail_list_phys =
+		priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
+
+	if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
+			 dev->name, priv->tx_head, priv->tx_tail);
+		netif_stop_queue(dev);
+		priv->tx_busy_count++;
+		return NETDEV_TX_BUSY;
+	}
+
+	tail_list->forward = 0;
+
+	tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
+						      skb->data, txlen,
+						      PCI_DMA_TODEVICE);
+	tlan_store_skb(tail_list, skb);
+
+	tail_list->frame_size = (u16) txlen;
+	tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
+	tail_list->buffer[1].count = 0;
+	tail_list->buffer[1].address = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	tail_list->c_stat = TLAN_CSTAT_READY;
+	if (!priv->tx_in_progress) {
+		priv->tx_in_progress = 1;
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  Starting TX on buffer %d\n",
+			 priv->tx_tail);
+		outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+		outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
+	} else {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  Adding buffer %d to TX channel\n",
+			 priv->tx_tail);
+		if (priv->tx_tail == 0) {
+			(priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
+				= tail_list_phys;
+		} else {
+			(priv->tx_list + (priv->tx_tail - 1))->forward
+				= tail_list_phys;
+		}
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
+
+	return NETDEV_TX_OK;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_handle_interrupt
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		irq	The line on which the interrupt
+ *			occurred.
+ *		dev_id	A pointer to the device assigned to
+ *			this irq line.
+ *
+ *	This function handles an interrupt generated by its
+ *	assigned TLAN adapter.  The function deactivates
+ *	interrupts on its adapter, records the type of
+ *	interrupt, executes the appropriate subhandler, and
+ *	acknowdges the interrupt to the adapter (thus
+ *	re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
+{
+	struct net_device	*dev = dev_id;
+	struct tlan_priv *priv = netdev_priv(dev);
+	u16		host_int;
+	u16		type;
+
+	spin_lock(&priv->lock);
+
+	host_int = inw(dev->base_addr + TLAN_HOST_INT);
+	type = (host_int & TLAN_HI_IT_MASK) >> 2;
+	if (type) {
+		u32	ack;
+		u32	host_cmd;
+
+		outw(host_int, dev->base_addr + TLAN_HOST_INT);
+		ack = tlan_int_vector[type](dev, host_int);
+
+		if (ack) {
+			host_cmd = TLAN_HC_ACK | ack | (type << 18);
+			outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
+		}
+	}
+
+	spin_unlock(&priv->lock);
+
+	return IRQ_RETVAL(type);
+}
+
+
+
+
+/***************************************************************
+ *	tlan_close
+ *
+ *	Returns:
+ *		An error code.
+ *	Parms:
+ *		dev	The device structure of the device to
+ *			close.
+ *
+ *	This function shuts down the adapter.  It records any
+ *	stats, puts the adapter into reset state, deactivates
+ *	its time as needed, and	frees the irq it is using.
+ *
+ **************************************************************/
+
+static int tlan_close(struct net_device *dev)
+{
+	tlan_stop(dev);
+
+	free_irq(dev->irq, dev);
+	tlan_free_lists(dev);
+	TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
+
+	return 0;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_get_stats
+ *
+ *	Returns:
+ *		A pointer to the device's statistics structure.
+ *	Parms:
+ *		dev	The device structure to return the
+ *			stats for.
+ *
+ *	This function updates the devices statistics by reading
+ *	the TLAN chip's onboard registers.  Then it returns the
+ *	address of the statistics structure.
+ *
+ **************************************************************/
+
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	int i;
+
+	/* Should only read stats if open ? */
+	tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+	TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
+		 priv->rx_eoc_count);
+	TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
+		 priv->tx_busy_count);
+	if (debug & TLAN_DEBUG_GNRL) {
+		tlan_print_dio(dev->base_addr);
+		tlan_phy_print(dev);
+	}
+	if (debug & TLAN_DEBUG_LIST) {
+		for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+			tlan_print_list(priv->rx_list + i, "RX", i);
+		for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+			tlan_print_list(priv->tx_list + i, "TX", i);
+	}
+
+	return &dev->stats;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_set_multicast_list
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	The device structure to set the
+ *			multicast list for.
+ *
+ *	This function sets the TLAN adaptor to various receive
+ *	modes.  If the IFF_PROMISC flag is set, promiscuous
+ *	mode is acitviated.  Otherwise,	promiscuous mode is
+ *	turned off.  If the IFF_ALLMULTI flag is set, then
+ *	the hash table is set to receive all group addresses.
+ *	Otherwise, the first three multicast addresses are
+ *	stored in AREG_1-3, and the rest are selected via the
+ *	hash table, as necessary.
+ *
+ **************************************************************/
+
+static void tlan_set_multicast_list(struct net_device *dev)
+{
+	struct netdev_hw_addr *ha;
+	u32			hash1 = 0;
+	u32			hash2 = 0;
+	int			i;
+	u32			offset;
+	u8			tmp;
+
+	if (dev->flags & IFF_PROMISC) {
+		tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+		tlan_dio_write8(dev->base_addr,
+				TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
+	} else {
+		tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+		tlan_dio_write8(dev->base_addr,
+				TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+		if (dev->flags & IFF_ALLMULTI) {
+			for (i = 0; i < 3; i++)
+				tlan_set_mac(dev, i + 1, NULL);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+					 0xffffffff);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+					 0xffffffff);
+		} else {
+			i = 0;
+			netdev_for_each_mc_addr(ha, dev) {
+				if (i < 3) {
+					tlan_set_mac(dev, i + 1,
+						     (char *) &ha->addr);
+				} else {
+					offset =
+						tlan_hash_func((u8 *)&ha->addr);
+					if (offset < 32)
+						hash1 |= (1 << offset);
+					else
+						hash2 |= (1 << (offset - 32));
+				}
+				i++;
+			}
+			for ( ; i < 3; i++)
+				tlan_set_mac(dev, i + 1, NULL);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
+		}
+	}
+
+}
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver interrupt vectors and table
+
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+/***************************************************************
+ *	tlan_handle_tx_eof
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles Tx EOF interrupts which are raised
+ *	by the adapter when it has completed sending the
+ *	contents of a buffer.  If detemines which list/buffer
+ *	was completed and resets it.  If the buffer was the last
+ *	in the channel (EOC), then the function checks to see if
+ *	another buffer is ready to send, and if so, sends a Tx
+ *	Go command.  Finally, the driver activates/continues the
+ *	activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	int		eoc = 0;
+	struct tlan_list	*head_list;
+	dma_addr_t	head_list_phys;
+	u32		ack = 0;
+	u16		tmp_c_stat;
+
+	TLAN_DBG(TLAN_DEBUG_TX,
+		 "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
+		 priv->tx_head, priv->tx_tail);
+	head_list = priv->tx_list + priv->tx_head;
+
+	while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+	       && (ack < 255)) {
+		struct sk_buff *skb = tlan_get_skb(head_list);
+
+		ack++;
+		pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
+				 max(skb->len,
+				     (unsigned int)TLAN_MIN_FRAME_SIZE),
+				 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(skb);
+		head_list->buffer[8].address = 0;
+		head_list->buffer[9].address = 0;
+
+		if (tmp_c_stat & TLAN_CSTAT_EOC)
+			eoc = 1;
+
+		dev->stats.tx_bytes += head_list->frame_size;
+
+		head_list->c_stat = TLAN_CSTAT_UNUSED;
+		netif_start_queue(dev);
+		CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+		head_list = priv->tx_list + priv->tx_head;
+	}
+
+	if (!ack)
+		netdev_info(dev,
+			    "Received interrupt for uncompleted TX frame\n");
+
+	if (eoc) {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  handling TX EOC (Head=%d Tail=%d)\n",
+			 priv->tx_head, priv->tx_tail);
+		head_list = priv->tx_list + priv->tx_head;
+		head_list_phys = priv->tx_list_dma
+			+ sizeof(struct tlan_list)*priv->tx_head;
+		if ((head_list->c_stat & TLAN_CSTAT_READY)
+		    == TLAN_CSTAT_READY) {
+			outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+			ack |= TLAN_HC_GO;
+		} else {
+			priv->tx_in_progress = 0;
+		}
+	}
+
+	if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+		tlan_dio_write8(dev->base_addr,
+				TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+		if (priv->timer.function == NULL) {
+			priv->timer.function = tlan_timer;
+			priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+			priv->timer_set_at = jiffies;
+			priv->timer_type = TLAN_TIMER_ACTIVITY;
+			add_timer(&priv->timer);
+		} else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+			priv->timer_set_at = jiffies;
+		}
+	}
+
+	return ack;
+
+}
+
+
+
+
+/***************************************************************
+ *	TLan_HandleStatOverflow
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles the Statistics Overflow interrupt
+ *	which means that one or more of the TLAN statistics
+ *	registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
+{
+	tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+	return 1;
+
+}
+
+
+
+
+/***************************************************************
+ *	TLan_HandleRxEOF
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles the Rx EOF interrupt which
+ *	indicates a frame has been received by the adapter from
+ *	the net and the frame has been transferred to memory.
+ *	The function determines the bounce buffer the frame has
+ *	been loaded into, creates a new sk_buff big enough to
+ *	hold the frame, and sends it to protocol stack.  It
+ *	then resets the used buffer and appends it to the end
+ *	of the list.  If the frame was the last in the Rx
+ *	channel (EOC), the function restarts the receive channel
+ *	by sending an Rx Go command to the adapter.  Then it
+ *	activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u32		ack = 0;
+	int		eoc = 0;
+	struct tlan_list	*head_list;
+	struct sk_buff	*skb;
+	struct tlan_list	*tail_list;
+	u16		tmp_c_stat;
+	dma_addr_t	head_list_phys;
+
+	TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  handling RX EOF (Head=%d Tail=%d)\n",
+		 priv->rx_head, priv->rx_tail);
+	head_list = priv->rx_list + priv->rx_head;
+	head_list_phys =
+		priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
+
+	while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+	       && (ack < 255)) {
+		dma_addr_t frame_dma = head_list->buffer[0].address;
+		u32 frame_size = head_list->frame_size;
+		struct sk_buff *new_skb;
+
+		ack++;
+		if (tmp_c_stat & TLAN_CSTAT_EOC)
+			eoc = 1;
+
+		new_skb = netdev_alloc_skb_ip_align(dev,
+						    TLAN_MAX_FRAME_SIZE + 5);
+		if (!new_skb)
+			goto drop_and_reuse;
+
+		skb = tlan_get_skb(head_list);
+		pci_unmap_single(priv->pci_dev, frame_dma,
+				 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+		skb_put(skb, frame_size);
+
+		dev->stats.rx_bytes += frame_size;
+
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_rx(skb);
+
+		head_list->buffer[0].address =
+			pci_map_single(priv->pci_dev, new_skb->data,
+				       TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+
+		tlan_store_skb(head_list, new_skb);
+drop_and_reuse:
+		head_list->forward = 0;
+		head_list->c_stat = 0;
+		tail_list = priv->rx_list + priv->rx_tail;
+		tail_list->forward = head_list_phys;
+
+		CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+		CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+		head_list = priv->rx_list + priv->rx_head;
+		head_list_phys = priv->rx_list_dma
+			+ sizeof(struct tlan_list)*priv->rx_head;
+	}
+
+	if (!ack)
+		netdev_info(dev,
+			    "Received interrupt for uncompleted RX frame\n");
+
+
+	if (eoc) {
+		TLAN_DBG(TLAN_DEBUG_RX,
+			 "RECEIVE:  handling RX EOC (Head=%d Tail=%d)\n",
+			 priv->rx_head, priv->rx_tail);
+		head_list = priv->rx_list + priv->rx_head;
+		head_list_phys = priv->rx_list_dma
+			+ sizeof(struct tlan_list)*priv->rx_head;
+		outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+		ack |= TLAN_HC_GO | TLAN_HC_RT;
+		priv->rx_eoc_count++;
+	}
+
+	if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+		tlan_dio_write8(dev->base_addr,
+				TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+		if (priv->timer.function == NULL)  {
+			priv->timer.function = tlan_timer;
+			priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+			priv->timer_set_at = jiffies;
+			priv->timer_type = TLAN_TIMER_ACTIVITY;
+			add_timer(&priv->timer);
+		} else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+			priv->timer_set_at = jiffies;
+		}
+	}
+
+	return ack;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_handle_dummy
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles the Dummy interrupt, which is
+ *	raised whenever a test interrupt is generated by setting
+ *	the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
+{
+	netdev_info(dev, "Test interrupt\n");
+	return 1;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_handle_tx_eoc
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This driver is structured to determine EOC occurrences by
+ *	reading the CSTAT member of the list structure.  Tx EOC
+ *	interrupts are disabled via the DIO INTDIS register.
+ *	However, TLAN chips before revision 3.0 didn't have this
+ *	functionality, so process EOC events if this is the
+ *	case.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	struct tlan_list		*head_list;
+	dma_addr_t		head_list_phys;
+	u32			ack = 1;
+
+	if (priv->tlan_rev < 0x30) {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+			 priv->tx_head, priv->tx_tail);
+		head_list = priv->tx_list + priv->tx_head;
+		head_list_phys = priv->tx_list_dma
+			+ sizeof(struct tlan_list)*priv->tx_head;
+		if ((head_list->c_stat & TLAN_CSTAT_READY)
+		    == TLAN_CSTAT_READY) {
+			netif_stop_queue(dev);
+			outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+			ack |= TLAN_HC_GO;
+		} else {
+			priv->tx_in_progress = 0;
+		}
+	}
+
+	return ack;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_handle_status_check
+ *
+ *	Returns:
+ *		0 if Adapter check, 1 if Network Status check.
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles Adapter Check/Network Status
+ *	interrupts generated by the adapter.  It checks the
+ *	vector in the HOST_INT register to determine if it is
+ *	an Adapter Check interrupt.  If so, it resets the
+ *	adapter.  Otherwise it clears the status registers
+ *	and services the PHY.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u32		ack;
+	u32		error;
+	u8		net_sts;
+	u32		phy;
+	u16		tlphy_ctl;
+	u16		tlphy_sts;
+
+	ack = 1;
+	if (host_int & TLAN_HI_IV_MASK) {
+		netif_stop_queue(dev);
+		error = inl(dev->base_addr + TLAN_CH_PARM);
+		netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+		tlan_read_and_clear_stats(dev, TLAN_RECORD);
+		outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+
+		schedule_work(&priv->tlan_tqueue);
+
+		netif_wake_queue(dev);
+		ack = 0;
+	} else {
+		TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+		phy = priv->phy[priv->phy_num];
+
+		net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+		if (net_sts) {
+			tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+			TLAN_DBG(TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
+				 dev->name, (unsigned) net_sts);
+		}
+		if ((net_sts & TLAN_NET_STS_MIRQ) &&  (priv->phy_num == 0)) {
+			tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+			tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+			if (!(tlphy_sts & TLAN_TS_POLOK) &&
+			    !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+				tlphy_ctl |= TLAN_TC_SWAPOL;
+				tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+						   tlphy_ctl);
+			} else if ((tlphy_sts & TLAN_TS_POLOK) &&
+				   (tlphy_ctl & TLAN_TC_SWAPOL)) {
+				tlphy_ctl &= ~TLAN_TC_SWAPOL;
+				tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+						   tlphy_ctl);
+			}
+
+			if (debug)
+				tlan_phy_print(dev);
+		}
+	}
+
+	return ack;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_handle_rx_eoc
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This driver is structured to determine EOC occurrences by
+ *	reading the CSTAT member of the list structure.  Rx EOC
+ *	interrupts are disabled via the DIO INTDIS register.
+ *	However, TLAN chips before revision 3.0 didn't have this
+ *	CSTAT member or a INTDIS register, so if this chip is
+ *	pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	dma_addr_t	head_list_phys;
+	u32		ack = 1;
+
+	if (priv->tlan_rev < 0x30) {
+		TLAN_DBG(TLAN_DEBUG_RX,
+			 "RECEIVE:  Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+			 priv->rx_head, priv->rx_tail);
+		head_list_phys = priv->rx_list_dma
+			+ sizeof(struct tlan_list)*priv->rx_head;
+		outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+		ack |= TLAN_HC_GO | TLAN_HC_RT;
+		priv->rx_eoc_count++;
+	}
+
+	return ack;
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver timer function
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ *	tlan_timer
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		data	A value given to add timer when
+ *			add_timer was called.
+ *
+ *	This function handles timed functionality for the
+ *	TLAN driver.  The two current timer uses are for
+ *	delaying for autonegotionation and driving the ACT LED.
+ *	-	Autonegotiation requires being allowed about
+ *		2 1/2 seconds before attempting to transmit a
+ *		packet.  It would be a very bad thing to hang
+ *		the kernel this long, so the driver doesn't
+ *		allow transmission 'til after this time, for
+ *		certain PHYs.  It would be much nicer if all
+ *		PHYs were interrupt-capable like the internal
+ *		PHY.
+ *	-	The ACT LED, which shows adapter activity, is
+ *		driven by the driver, and so must be left on
+ *		for a short period to power up the LED so it
+ *		can be seen.  This delay can be changed by
+ *		changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ *		if desired.  100 ms  produces a slightly
+ *		sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(struct timer_list *t)
+{
+	struct tlan_priv	*priv = from_timer(priv, t, timer);
+	struct net_device	*dev = priv->dev;
+	u32		elapsed;
+	unsigned long	flags = 0;
+
+	priv->timer.function = NULL;
+
+	switch (priv->timer_type) {
+	case TLAN_TIMER_PHY_PDOWN:
+		tlan_phy_power_down(dev);
+		break;
+	case TLAN_TIMER_PHY_PUP:
+		tlan_phy_power_up(dev);
+		break;
+	case TLAN_TIMER_PHY_RESET:
+		tlan_phy_reset(dev);
+		break;
+	case TLAN_TIMER_PHY_START_LINK:
+		tlan_phy_start_link(dev);
+		break;
+	case TLAN_TIMER_PHY_FINISH_AN:
+		tlan_phy_finish_auto_neg(dev);
+		break;
+	case TLAN_TIMER_FINISH_RESET:
+		tlan_finish_reset(dev);
+		break;
+	case TLAN_TIMER_ACTIVITY:
+		spin_lock_irqsave(&priv->lock, flags);
+		if (priv->timer.function == NULL) {
+			elapsed = jiffies - priv->timer_set_at;
+			if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+				tlan_dio_write8(dev->base_addr,
+						TLAN_LED_REG, TLAN_LED_LINK);
+			} else  {
+				priv->timer.expires = priv->timer_set_at
+					+ TLAN_TIMER_ACT_DELAY;
+				spin_unlock_irqrestore(&priv->lock, flags);
+				add_timer(&priv->timer);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&priv->lock, flags);
+		break;
+	default:
+		break;
+	}
+
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver adapter related routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ *	tlan_reset_lists
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	The device structure with the list
+ *			structures to be reset.
+ *
+ *	This routine sets the variables associated with managing
+ *	the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	int		i;
+	struct tlan_list	*list;
+	dma_addr_t	list_phys;
+	struct sk_buff	*skb;
+
+	priv->tx_head = 0;
+	priv->tx_tail = 0;
+	for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+		list = priv->tx_list + i;
+		list->c_stat = TLAN_CSTAT_UNUSED;
+		list->buffer[0].address = 0;
+		list->buffer[2].count = 0;
+		list->buffer[2].address = 0;
+		list->buffer[8].address = 0;
+		list->buffer[9].address = 0;
+	}
+
+	priv->rx_head = 0;
+	priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+	for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+		list = priv->rx_list + i;
+		list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+		list->c_stat = TLAN_CSTAT_READY;
+		list->frame_size = TLAN_MAX_FRAME_SIZE;
+		list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+		skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
+		if (!skb)
+			break;
+
+		list->buffer[0].address = pci_map_single(priv->pci_dev,
+							 skb->data,
+							 TLAN_MAX_FRAME_SIZE,
+							 PCI_DMA_FROMDEVICE);
+		tlan_store_skb(list, skb);
+		list->buffer[1].count = 0;
+		list->buffer[1].address = 0;
+		list->forward = list_phys + sizeof(struct tlan_list);
+	}
+
+	/* in case ran out of memory early, clear bits */
+	while (i < TLAN_NUM_RX_LISTS) {
+		tlan_store_skb(priv->rx_list + i, NULL);
+		++i;
+	}
+	list->forward = 0;
+
+}
+
+
+static void tlan_free_lists(struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	int		i;
+	struct tlan_list	*list;
+	struct sk_buff	*skb;
+
+	for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+		list = priv->tx_list + i;
+		skb = tlan_get_skb(list);
+		if (skb) {
+			pci_unmap_single(
+				priv->pci_dev,
+				list->buffer[0].address,
+				max(skb->len,
+				    (unsigned int)TLAN_MIN_FRAME_SIZE),
+				PCI_DMA_TODEVICE);
+			dev_kfree_skb_any(skb);
+			list->buffer[8].address = 0;
+			list->buffer[9].address = 0;
+		}
+	}
+
+	for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+		list = priv->rx_list + i;
+		skb = tlan_get_skb(list);
+		if (skb) {
+			pci_unmap_single(priv->pci_dev,
+					 list->buffer[0].address,
+					 TLAN_MAX_FRAME_SIZE,
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb_any(skb);
+			list->buffer[8].address = 0;
+			list->buffer[9].address = 0;
+		}
+	}
+}
+
+
+
+
+/***************************************************************
+ *	tlan_print_dio
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		io_base		Base IO port of the device of
+ *				which to print DIO registers.
+ *
+ *	This function prints out all the internal (DIO)
+ *	registers of a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_print_dio(u16 io_base)
+{
+	u32 data0, data1;
+	int	i;
+
+	pr_info("Contents of internal registers for io base 0x%04hx\n",
+		io_base);
+	pr_info("Off.  +0        +4\n");
+	for (i = 0; i < 0x4C; i += 8) {
+		data0 = tlan_dio_read32(io_base, i);
+		data1 = tlan_dio_read32(io_base, i + 0x4);
+		pr_info("0x%02x  0x%08x 0x%08x\n", i, data0, data1);
+	}
+
+}
+
+
+
+
+/***************************************************************
+ *	TLan_PrintList
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		list	A pointer to the struct tlan_list structure to
+ *			be printed.
+ *		type	A string to designate type of list,
+ *			"Rx" or "Tx".
+ *		num	The index of the list.
+ *
+ *	This function prints out the contents of the list
+ *	pointed to by the list parameter.
+ *
+ **************************************************************/
+
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
+{
+	int i;
+
+	pr_info("%s List %d at %p\n", type, num, list);
+	pr_info("   Forward    = 0x%08x\n",  list->forward);
+	pr_info("   CSTAT      = 0x%04hx\n", list->c_stat);
+	pr_info("   Frame Size = 0x%04hx\n", list->frame_size);
+	/* for (i = 0; i < 10; i++) { */
+	for (i = 0; i < 2; i++) {
+		pr_info("   Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+			i, list->buffer[i].count, list->buffer[i].address);
+	}
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_read_and_clear_stats
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	Pointer to device structure of adapter
+ *			to which to read stats.
+ *		record	Flag indicating whether to add
+ *
+ *	This functions reads all the internal status registers
+ *	of the TLAN chip, which clears them as a side effect.
+ *	It then either adds the values to the device's status
+ *	struct, or discards them, depending on whether record
+ *	is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
+{
+	u32		tx_good, tx_under;
+	u32		rx_good, rx_over;
+	u32		def_tx, crc, code;
+	u32		multi_col, single_col;
+	u32		excess_col, late_col, loss;
+
+	outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	tx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+	tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+	tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+	outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	rx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+	rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+	rx_over  = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+	outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+	def_tx  = inb(dev->base_addr + TLAN_DIO_DATA);
+	def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	crc     = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+	code    = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+	outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	multi_col   = inb(dev->base_addr + TLAN_DIO_DATA);
+	multi_col  += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	single_col  = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+	single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+	outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+	late_col   = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+	loss       = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+	if (record) {
+		dev->stats.rx_packets += rx_good;
+		dev->stats.rx_errors  += rx_over + crc + code;
+		dev->stats.tx_packets += tx_good;
+		dev->stats.tx_errors  += tx_under + loss;
+		dev->stats.collisions += multi_col
+			+ single_col + excess_col + late_col;
+
+		dev->stats.rx_over_errors    += rx_over;
+		dev->stats.rx_crc_errors     += crc;
+		dev->stats.rx_frame_errors   += code;
+
+		dev->stats.tx_aborted_errors += tx_under;
+		dev->stats.tx_carrier_errors += loss;
+	}
+
+}
+
+
+
+
+/***************************************************************
+ *	TLan_Reset
+ *
+ *	Returns:
+ *		0
+ *	Parms:
+ *		dev	Pointer to device structure of adapter
+ *			to be reset.
+ *
+ *	This function resets the adapter and it's physical
+ *	device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
+ *	Programmer's Guide" for details.  The routine tries to
+ *	implement what is detailed there, though adjustments
+ *	have been made.
+ *
+ **************************************************************/
+
+static void
+tlan_reset_adapter(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	int		i;
+	u32		addr;
+	u32		data;
+	u8		data8;
+
+	priv->tlan_full_duplex = false;
+	priv->phy_online = 0;
+	netif_carrier_off(dev);
+
+/*  1.	Assert reset bit. */
+
+	data = inl(dev->base_addr + TLAN_HOST_CMD);
+	data |= TLAN_HC_AD_RST;
+	outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+	udelay(1000);
+
+/*  2.	Turn off interrupts. (Probably isn't necessary) */
+
+	data = inl(dev->base_addr + TLAN_HOST_CMD);
+	data |= TLAN_HC_INT_OFF;
+	outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/*  3.	Clear AREGs and HASHs. */
+
+	for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+		tlan_dio_write32(dev->base_addr, (u16) i, 0);
+
+/*  4.	Setup NetConfig register. */
+
+	data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+	tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+/*  5.	Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+	outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+	outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
+
+/*  6.	Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+	outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+	addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+	tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
+
+/*  7.	Setup the remaining registers. */
+
+	if (priv->tlan_rev >= 0x30) {
+		data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+		tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
+	}
+	tlan_phy_detect(dev);
+	data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+
+	if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
+		data |= TLAN_NET_CFG_BIT;
+		if (priv->aui == 1) {
+			tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+		} else if (priv->duplex == TLAN_DUPLEX_FULL) {
+			tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+			priv->tlan_full_duplex = true;
+		} else {
+			tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
+		}
+	}
+
+	/* don't power down internal PHY if we're going to use it */
+	if (priv->phy_num == 0 ||
+	   (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
+		data |= TLAN_NET_CFG_PHY_EN;
+	tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+	if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+		tlan_finish_reset(dev);
+	else
+		tlan_phy_power_down(dev);
+
+}
+
+
+
+
+static void
+tlan_finish_reset(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u8		data;
+	u32		phy;
+	u8		sio;
+	u16		status;
+	u16		partner;
+	u16		tlphy_ctl;
+	u16		tlphy_par;
+	u16		tlphy_id1, tlphy_id2;
+	int		i;
+
+	phy = priv->phy[priv->phy_num];
+
+	data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+	if (priv->tlan_full_duplex)
+		data |= TLAN_NET_CMD_DUPLEX;
+	tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
+	data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+	if (priv->phy_num == 0)
+		data |= TLAN_NET_MASK_MASK7;
+	tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+	tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+	tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+	tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
+
+	if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+	    (priv->aui)) {
+		status = MII_GS_LINK;
+		netdev_info(dev, "Link forced\n");
+	} else {
+		tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+		udelay(1000);
+		tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+		if (status & MII_GS_LINK) {
+			/* We only support link info on Nat.Sem. PHY's */
+			if ((tlphy_id1 == NAT_SEM_ID1) &&
+			    (tlphy_id2 == NAT_SEM_ID2)) {
+				tlan_mii_read_reg(dev, phy, MII_AN_LPA,
+					&partner);
+				tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
+					&tlphy_par);
+
+				netdev_info(dev,
+					"Link active, %s %uMbps %s-Duplex\n",
+					!(tlphy_par & TLAN_PHY_AN_EN_STAT)
+					? "forced" : "Autonegotiation enabled,",
+					tlphy_par & TLAN_PHY_SPEED_100
+					? 100 : 10,
+					tlphy_par & TLAN_PHY_DUPLEX_FULL
+					? "Full" : "Half");
+
+				if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+					netdev_info(dev, "Partner capability:");
+					for (i = 5; i < 10; i++)
+						if (partner & (1 << i))
+							pr_cont(" %s",
+								media[i-5]);
+					pr_cont("\n");
+				}
+			} else
+				netdev_info(dev, "Link active\n");
+			/* Enabling link beat monitoring */
+			priv->media_timer.expires = jiffies + HZ;
+			add_timer(&priv->media_timer);
+		}
+	}
+
+	if (priv->phy_num == 0) {
+		tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+		tlphy_ctl |= TLAN_TC_INTEN;
+		tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+		sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+		sio |= TLAN_NET_SIO_MINTEN;
+		tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+	}
+
+	if (status & MII_GS_LINK) {
+		tlan_set_mac(dev, 0, dev->dev_addr);
+		priv->phy_online = 1;
+		outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+		if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+			outb((TLAN_HC_REQ_INT >> 8),
+			     dev->base_addr + TLAN_HOST_CMD + 1);
+		outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+		outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+		tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
+		netif_carrier_on(dev);
+	} else {
+		netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+		tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
+		return;
+	}
+	tlan_set_multicast_list(dev);
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_set_mac
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	Pointer to device structure of adapter
+ *			on which to change the AREG.
+ *		areg	The AREG to set the address in (0 - 3).
+ *		mac	A pointer to an array of chars.  Each
+ *			element stores one byte of the address.
+ *			IE, it isn't in ascii.
+ *
+ *	This function transfers a MAC address to one of the
+ *	TLAN AREGs (address registers).  The TLAN chip locks
+ *	the register on writing to offset 0 and unlocks the
+ *	register after writing to offset 5.  If NULL is passed
+ *	in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
+{
+	int i;
+
+	areg *= 6;
+
+	if (mac != NULL) {
+		for (i = 0; i < 6; i++)
+			tlan_dio_write8(dev->base_addr,
+					TLAN_AREG_0 + areg + i, mac[i]);
+	} else {
+		for (i = 0; i < 6; i++)
+			tlan_dio_write8(dev->base_addr,
+					TLAN_AREG_0 + areg + i, 0);
+	}
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver PHY layer routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+/*********************************************************************
+ *	tlan_phy_print
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	A pointer to the device structure of the
+ *			TLAN device having the PHYs to be detailed.
+ *
+ *	This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
+
+static void tlan_phy_print(struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	u16 i, data0, data1, data2, data3, phy;
+
+	phy = priv->phy[priv->phy_num];
+
+	if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+		netdev_info(dev, "Unmanaged PHY\n");
+	} else if (phy <= TLAN_PHY_MAX_ADDR) {
+		netdev_info(dev, "PHY 0x%02x\n", phy);
+		pr_info("   Off.  +0     +1     +2     +3\n");
+		for (i = 0; i < 0x20; i += 4) {
+			tlan_mii_read_reg(dev, phy, i, &data0);
+			tlan_mii_read_reg(dev, phy, i + 1, &data1);
+			tlan_mii_read_reg(dev, phy, i + 2, &data2);
+			tlan_mii_read_reg(dev, phy, i + 3, &data3);
+			pr_info("   0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+				i, data0, data1, data2, data3);
+		}
+	} else {
+		netdev_info(dev, "Invalid PHY\n");
+	}
+
+}
+
+
+
+
+/*********************************************************************
+ *	tlan_phy_detect
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	A pointer to the device structure of the adapter
+ *			for which the PHY needs determined.
+ *
+ *	So far I've found that adapters which have external PHYs
+ *	may also use the internal PHY for part of the functionality.
+ *	(eg, AUI/Thinnet).  This function finds out if this TLAN
+ *	chip has an internal PHY, and then finds the first external
+ *	PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+static void tlan_phy_detect(struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+	u16		control;
+	u16		hi;
+	u16		lo;
+	u32		phy;
+
+	if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+		priv->phy_num = 0xffff;
+		return;
+	}
+
+	tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
+
+	if (hi != 0xffff)
+		priv->phy[0] = TLAN_PHY_MAX_ADDR;
+	else
+		priv->phy[0] = TLAN_PHY_NONE;
+
+	priv->phy[1] = TLAN_PHY_NONE;
+	for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+		tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+		tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+		tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+		if ((control != 0xffff) ||
+		    (hi != 0xffff) || (lo != 0xffff)) {
+			TLAN_DBG(TLAN_DEBUG_GNRL,
+				 "PHY found at %02x %04x %04x %04x\n",
+				 phy, control, hi, lo);
+			if ((priv->phy[1] == TLAN_PHY_NONE) &&
+			    (phy != TLAN_PHY_MAX_ADDR)) {
+				priv->phy[1] = phy;
+			}
+		}
+	}
+
+	if (priv->phy[1] != TLAN_PHY_NONE)
+		priv->phy_num = 1;
+	else if (priv->phy[0] != TLAN_PHY_NONE)
+		priv->phy_num = 0;
+	else
+		netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
+
+}
+
+
+
+
+static void tlan_phy_power_down(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u16		value;
+
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
+	value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+	tlan_mii_sync(dev->base_addr);
+	tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+	if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
+		/* if using internal PHY, the external PHY must be powered on */
+		if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
+			value = MII_GC_ISOLATE; /* just isolate it from MII */
+		tlan_mii_sync(dev->base_addr);
+		tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
+	}
+
+	/* Wait for 50 ms and powerup
+	 * This is abitrary.  It is intended to make sure the
+	 * transceiver settles.
+	 */
+	tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
+
+}
+
+
+
+
+static void tlan_phy_power_up(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u16		value;
+
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+	tlan_mii_sync(dev->base_addr);
+	value = MII_GC_LOOPBK;
+	tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+	tlan_mii_sync(dev->base_addr);
+	/* Wait for 500 ms and reset the
+	 * transceiver.  The TLAN docs say both 50 ms and
+	 * 500 ms, so do the longer, just in case.
+	 */
+	tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
+
+}
+
+
+
+
+static void tlan_phy_reset(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u16		phy;
+	u16		value;
+	unsigned long timeout = jiffies + HZ;
+
+	phy = priv->phy[priv->phy_num];
+
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
+	tlan_mii_sync(dev->base_addr);
+	value = MII_GC_LOOPBK | MII_GC_RESET;
+	tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+	do {
+		tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+		if (time_after(jiffies, timeout)) {
+			netdev_err(dev, "PHY reset timeout\n");
+			return;
+		}
+	} while (value & MII_GC_RESET);
+
+	/* Wait for 500 ms and initialize.
+	 * I don't remember why I wait this long.
+	 * I've changed this to 50ms, as it seems long enough.
+	 */
+	tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
+
+}
+
+
+
+
+static void tlan_phy_start_link(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u16		ability;
+	u16		control;
+	u16		data;
+	u16		phy;
+	u16		status;
+	u16		tctl;
+
+	phy = priv->phy[priv->phy_num];
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
+
+	if ((status & MII_GS_AUTONEG) &&
+	    (!priv->aui)) {
+		ability = status >> 11;
+		if (priv->speed  == TLAN_SPEED_10 &&
+		    priv->duplex == TLAN_DUPLEX_HALF) {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+		} else if (priv->speed == TLAN_SPEED_10 &&
+			   priv->duplex == TLAN_DUPLEX_FULL) {
+			priv->tlan_full_duplex = true;
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+		} else if (priv->speed == TLAN_SPEED_100 &&
+			   priv->duplex == TLAN_DUPLEX_HALF) {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+		} else if (priv->speed == TLAN_SPEED_100 &&
+			   priv->duplex == TLAN_DUPLEX_FULL) {
+			priv->tlan_full_duplex = true;
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
+		} else {
+
+			/* Set Auto-Neg advertisement */
+			tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+					   (ability << 5) | 1);
+			/* Enablee Auto-Neg */
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
+			/* Restart Auto-Neg */
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
+			/* Wait for 4 sec for autonegotiation
+			 * to complete.  The max spec time is less than this
+			 * but the card need additional time to start AN.
+			 * .5 sec should be plenty extra.
+			 */
+			netdev_info(dev, "Starting autonegotiation\n");
+			tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
+			return;
+		}
+
+	}
+
+	if ((priv->aui) && (priv->phy_num != 0)) {
+		priv->phy_num = 0;
+		data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+			| TLAN_NET_CFG_PHY_EN;
+		tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+		tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
+		return;
+	} else if (priv->phy_num == 0) {
+		control = 0;
+		tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+		if (priv->aui) {
+			tctl |= TLAN_TC_AUISEL;
+		} else {
+			tctl &= ~TLAN_TC_AUISEL;
+			if (priv->duplex == TLAN_DUPLEX_FULL) {
+				control |= MII_GC_DUPLEX;
+				priv->tlan_full_duplex = true;
+			}
+			if (priv->speed == TLAN_SPEED_100)
+				control |= MII_GC_SPEEDSEL;
+		}
+		tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+		tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
+	}
+
+	/* Wait for 2 sec to give the transceiver time
+	 * to establish link.
+	 */
+	tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
+
+}
+
+
+
+
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
+{
+	struct tlan_priv	*priv = netdev_priv(dev);
+	u16		an_adv;
+	u16		an_lpa;
+	u16		mode;
+	u16		phy;
+	u16		status;
+
+	phy = priv->phy[priv->phy_num];
+
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+	udelay(1000);
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+
+	if (!(status & MII_GS_AUTOCMPLT)) {
+		/* Wait for 8 sec to give the process
+		 * more time.  Perhaps we should fail after a while.
+		 */
+		tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
+		return;
+	}
+
+	netdev_info(dev, "Autonegotiation complete\n");
+	tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+	tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
+	mode = an_adv & an_lpa & 0x03E0;
+	if (mode & 0x0100)
+		priv->tlan_full_duplex = true;
+	else if (!(mode & 0x0080) && (mode & 0x0040))
+		priv->tlan_full_duplex = true;
+
+	/* switch to internal PHY for 10 Mbps */
+	if ((!(mode & 0x0180)) &&
+	    (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+	    (priv->phy_num != 0)) {
+		priv->phy_num = 0;
+		tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
+		return;
+	}
+
+	if (priv->phy_num == 0) {
+		if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+		    (an_adv & an_lpa & 0x0040)) {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+					   MII_GC_AUTOENB | MII_GC_DUPLEX);
+			netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
+		} else {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+					   MII_GC_AUTOENB);
+			netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
+		}
+	}
+
+	/* Wait for 100 ms.  No reason in partiticular.
+	 */
+	tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
+
+}
+
+
+/*********************************************************************
+ *
+ *     tlan_phy_monitor
+ *
+ *     Returns:
+ *	      None
+ *
+ *     Params:
+ *	      data	     The device structure of this device.
+ *
+ *
+ *     This function monitors PHY condition by reading the status
+ *     register via the MII bus, controls LINK LED and notifies the
+ *     kernel about link state.
+ *
+ *******************************************************************/
+
+static void tlan_phy_monitor(struct timer_list *t)
+{
+	struct tlan_priv *priv = from_timer(priv, t, media_timer);
+	struct net_device *dev = priv->dev;
+	u16     phy;
+	u16     phy_status;
+
+	phy = priv->phy[priv->phy_num];
+
+	/* Get PHY status register */
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
+
+	/* Check if link has been lost */
+	if (!(phy_status & MII_GS_LINK)) {
+		if (netif_carrier_ok(dev)) {
+			printk(KERN_DEBUG "TLAN: %s has lost link\n",
+			       dev->name);
+			tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
+			netif_carrier_off(dev);
+			if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
+				/* power down internal PHY */
+				u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
+					   MII_GC_ISOLATE;
+
+				tlan_mii_sync(dev->base_addr);
+				tlan_mii_write_reg(dev, priv->phy[0],
+						   MII_GEN_CTL, data);
+				/* set to external PHY */
+				priv->phy_num = 1;
+				/* restart autonegotiation */
+				tlan_set_timer(dev, msecs_to_jiffies(400),
+					       TLAN_TIMER_PHY_PDOWN);
+				return;
+			}
+		}
+	}
+
+	/* Link restablished? */
+	if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
+		tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
+		printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+		       dev->name);
+		netif_carrier_on(dev);
+	}
+	priv->media_timer.expires = jiffies + HZ;
+	add_timer(&priv->media_timer);
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver MII routines
+
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ *	tlan_mii_read_reg
+ *
+ *	Returns:
+ *		false	if ack received ok
+ *		true	if no ack received or other error
+ *
+ *	Parms:
+ *		dev		The device structure containing
+ *				The io address and interrupt count
+ *				for this device.
+ *		phy		The address of the PHY to be queried.
+ *		reg		The register whose contents are to be
+ *				retrieved.
+ *		val		A pointer to a variable to store the
+ *				retrieved value.
+ *
+ *	This function uses the TLAN's MII bus to retrieve the contents
+ *	of a given register on a PHY.  It sends the appropriate info
+ *	and then reads the 16-bit register value from the MII bus via
+ *	the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
+{
+	u8	nack;
+	u16	sio, tmp;
+	u32	i;
+	bool	err;
+	int	minten;
+	struct tlan_priv *priv = netdev_priv(dev);
+	unsigned long flags = 0;
+
+	err = false;
+	outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+	sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+	if (!in_irq())
+		spin_lock_irqsave(&priv->lock, flags);
+
+	tlan_mii_sync(dev->base_addr);
+
+	minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+	if (minten)
+		tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+	tlan_mii_send_data(dev->base_addr, 0x1, 2);	/* start (01b) */
+	tlan_mii_send_data(dev->base_addr, 0x2, 2);	/* read  (10b) */
+	tlan_mii_send_data(dev->base_addr, phy, 5);	/* device #      */
+	tlan_mii_send_data(dev->base_addr, reg, 5);	/* register #    */
+
+
+	tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);	/* change direction */
+
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);		/* clock idle bit */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);		/* wait 300ns */
+
+	nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);	/* check for ACK */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);		/* finish ACK */
+	if (nack) {					/* no ACK, so fake it */
+		for (i = 0; i < 16; i++) {
+			tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+			tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+		}
+		tmp = 0xffff;
+		err = true;
+	} else {					/* ACK, so read data */
+		for (tmp = 0, i = 0x8000; i; i >>= 1) {
+			tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+			if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
+				tmp |= i;
+			tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+		}
+	}
+
+
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);		/* idle cycle */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+	if (minten)
+		tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+	*val = tmp;
+
+	if (!in_irq())
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+	return err;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_mii_send_data
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		base_port	The base IO port of the adapter	in
+ *				question.
+ *		dev		The address of the PHY to be queried.
+ *		data		The value to be placed on the MII bus.
+ *		num_bits	The number of bits in data that are to
+ *				be placed on the MII bus.
+ *
+ *	This function sends on sequence of bits on the MII
+ *	configuration bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
+{
+	u16 sio;
+	u32 i;
+
+	if (num_bits == 0)
+		return;
+
+	outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+	sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+	tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
+
+	for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+		tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+		(void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+		if (data & i)
+			tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
+		else
+			tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+		(void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+	}
+
+}
+
+
+
+
+/***************************************************************
+ *	TLan_MiiSync
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		base_port	The base IO port of the adapter in
+ *				question.
+ *
+ *	This functions syncs all PHYs in terms of the MII configuration
+ *	bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_sync(u16 base_port)
+{
+	int i;
+	u16 sio;
+
+	outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+	sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+	tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+	for (i = 0; i < 32; i++) {
+		tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+		tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+	}
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_mii_write_reg
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev		The device structure for the device
+ *				to write to.
+ *		phy		The address of the PHY to be written to.
+ *		reg		The register whose contents are to be
+ *				written.
+ *		val		The value to be written to the register.
+ *
+ *	This function uses the TLAN's MII bus to write the contents of a
+ *	given register on a PHY.  It sends the appropriate info and then
+ *	writes the 16-bit register value from the MII configuration bus
+ *	via the TLAN SIO register.
+ *
+ **************************************************************/
+
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+	u16	sio;
+	int	minten;
+	unsigned long flags = 0;
+	struct tlan_priv *priv = netdev_priv(dev);
+
+	outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+	sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+	if (!in_irq())
+		spin_lock_irqsave(&priv->lock, flags);
+
+	tlan_mii_sync(dev->base_addr);
+
+	minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+	if (minten)
+		tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+	tlan_mii_send_data(dev->base_addr, 0x1, 2);	/* start (01b) */
+	tlan_mii_send_data(dev->base_addr, 0x1, 2);	/* write (01b) */
+	tlan_mii_send_data(dev->base_addr, phy, 5);	/* device #      */
+	tlan_mii_send_data(dev->base_addr, reg, 5);	/* register #    */
+
+	tlan_mii_send_data(dev->base_addr, 0x2, 2);	/* send ACK */
+	tlan_mii_send_data(dev->base_addr, val, 16);	/* send data */
+
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);	/* idle cycle */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+	if (minten)
+		tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+	if (!in_irq())
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver eeprom routines
+
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM.  these functions are based on information in microchip's
+data sheet.  I don't know how well this functions will work with
+other Eeproms.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ *	tlan_ee_send_start
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *
+ *	This function sends a start cycle to an EEPROM attached
+ *	to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
+{
+	u16	sio;
+
+	outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+	sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+	tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+	tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+	tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+	tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+	tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_ee_send_byte
+ *
+ *	Returns:
+ *		If the correct ack was received, 0, otherwise 1
+ *	Parms:	io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *		data		The 8 bits of information to
+ *				send to the EEPROM.
+ *		stop		If TLAN_EEPROM_STOP is passed, a
+ *				stop cycle is sent after the
+ *				byte is sent after the ack is
+ *				read.
+ *
+ *	This function sends a byte on the serial EEPROM line,
+ *	driving the clock to send each bit. The function then
+ *	reverses transmission direction and reads an acknowledge
+ *	bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
+{
+	int	err;
+	u8	place;
+	u16	sio;
+
+	outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+	sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+	/* Assume clock is low, tx is enabled; */
+	for (place = 0x80; place != 0; place >>= 1) {
+		if (place & data)
+			tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+		else
+			tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+	}
+	tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+	tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+	err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+	tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+	tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+
+	if ((!err) && stop) {
+		/* STOP, raise data while clock is high */
+		tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+	}
+
+	return err;
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_ee_receive_byte
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *		data		An address to a char to hold the
+ *				data sent from the EEPROM.
+ *		stop		If TLAN_EEPROM_STOP is passed, a
+ *				stop cycle is sent after the
+ *				byte is received, and no ack is
+ *				sent.
+ *
+ *	This function receives 8 bits of data from the EEPROM
+ *	over the serial link.  It then sends and ack bit, or no
+ *	ack and a stop bit.  This function is used to retrieve
+ *	data after the address of a byte in the EEPROM has been
+ *	sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
+{
+	u8  place;
+	u16 sio;
+
+	outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+	sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+	*data = 0;
+
+	/* Assume clock is low, tx is enabled; */
+	tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+	for (place = 0x80; place; place >>= 1) {
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
+			*data |= place;
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+	}
+
+	tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+	if (!stop) {
+		tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+	} else {
+		tlan_set_bit(TLAN_NET_SIO_EDATA, sio);	/* no ack = 1 (?) */
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+		/* STOP, raise data while clock is high */
+		tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+	}
+
+}
+
+
+
+
+/***************************************************************
+ *	tlan_ee_read_byte
+ *
+ *	Returns:
+ *		No error = 0, else, the stage at which the error
+ *		occurred.
+ *	Parms:
+ *		io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *		ee_addr		The address of the byte in the
+ *				EEPROM whose contents are to be
+ *				retrieved.
+ *		data		An address to a char to hold the
+ *				data obtained from the EEPROM.
+ *
+ *	This function reads a byte of information from an byte
+ *	cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
+{
+	int err;
+	struct tlan_priv *priv = netdev_priv(dev);
+	unsigned long flags = 0;
+	int ret = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	tlan_ee_send_start(dev->base_addr);
+	err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+	if (err) {
+		ret = 1;
+		goto fail;
+	}
+	err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+	if (err) {
+		ret = 2;
+		goto fail;
+	}
+	tlan_ee_send_start(dev->base_addr);
+	err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+	if (err) {
+		ret = 3;
+		goto fail;
+	}
+	tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
+fail:
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return ret;
+
+}
+
+
+
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h
new file mode 100644
index 0000000..e992841
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.h
@@ -0,0 +1,544 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ *  Linux ThunderLAN Driver
+ *
+ *  tlan.h
+ *  by James Banks
+ *
+ *  (C) 1997-1998 Caldera, Inc.
+ *  (C) 1999-2001 Torben Mathiasen
+ *
+ *  This software may be used and distributed according to the terms
+ *  of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ *  Dec 10, 1999	Torben Mathiasen <torben.mathiasen@compaq.com>
+ *			New Maintainer
+ *
+ ********************************************************************/
+
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+
+
+	/*****************************************************************
+	 * TLan Definitions
+	 *
+	 ****************************************************************/
+
+#define TLAN_MIN_FRAME_SIZE	64
+#define TLAN_MAX_FRAME_SIZE	1600
+
+#define TLAN_NUM_RX_LISTS	32
+#define TLAN_NUM_TX_LISTS	64
+
+#define TLAN_IGNORE		0
+#define TLAN_RECORD		1
+
+#define TLAN_DBG(lvl, format, args...)					\
+	do {								\
+		if (debug&lvl)						\
+			printk(KERN_DEBUG "TLAN: " format, ##args);	\
+	} while (0)
+
+#define TLAN_DEBUG_GNRL		0x0001
+#define TLAN_DEBUG_TX		0x0002
+#define TLAN_DEBUG_RX		0x0004
+#define TLAN_DEBUG_LIST		0x0008
+#define TLAN_DEBUG_PROBE	0x0010
+
+#define TX_TIMEOUT		(10*HZ)	 /* We need time for auto-neg */
+#define MAX_TLAN_BOARDS		8	 /* Max number of boards installed
+					    at a time */
+
+
+	/*****************************************************************
+	 * Device Identification Definitions
+	 *
+	 ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2			0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100	0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183			0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325			0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326			0x0014
+#endif
+
+struct tlan_adapter_entry {
+	u16	vendor_id;
+	u16	device_id;
+	char	*device_label;
+	u32	flags;
+	u16	addr_ofs;
+};
+
+#define TLAN_ADAPTER_NONE		0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY	0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY	0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10	0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED	0x00000008
+
+#define TLAN_SPEED_DEFAULT	0
+#define TLAN_SPEED_10		10
+#define TLAN_SPEED_100		100
+
+#define TLAN_DUPLEX_DEFAULT	0
+#define TLAN_DUPLEX_HALF	1
+#define TLAN_DUPLEX_FULL	2
+
+
+
+	/*****************************************************************
+	 * EISA Definitions
+	 *
+	 ****************************************************************/
+
+#define EISA_ID      0xc80   /* EISA ID Registers */
+#define EISA_ID0     0xc80   /* EISA ID Register 0 */
+#define EISA_ID1     0xc81   /* EISA ID Register 1 */
+#define EISA_ID2     0xc82   /* EISA ID Register 2 */
+#define EISA_ID3     0xc83   /* EISA ID Register 3 */
+#define EISA_CR      0xc84   /* EISA Control Register */
+#define EISA_REG0    0xc88   /* EISA Configuration Register 0 */
+#define EISA_REG1    0xc89   /* EISA Configuration Register 1 */
+#define EISA_REG2    0xc8a   /* EISA Configuration Register 2 */
+#define EISA_REG3    0xc8f   /* EISA Configuration Register 3 */
+#define EISA_APROM   0xc90   /* Ethernet Address PROM */
+
+
+
+	/*****************************************************************
+	 * Rx/Tx List Definitions
+	 *
+	 ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST	10
+#define TLAN_LAST_BUFFER	0x80000000
+#define TLAN_CSTAT_UNUSED	0x8000
+#define TLAN_CSTAT_FRM_CMP	0x4000
+#define TLAN_CSTAT_READY	0x3000
+#define TLAN_CSTAT_EOC		0x0800
+#define TLAN_CSTAT_RX_ERROR	0x0400
+#define TLAN_CSTAT_PASS_CRC	0x0200
+#define TLAN_CSTAT_DP_PR	0x0100
+
+
+struct tlan_buffer {
+	u32	count;
+	u32	address;
+};
+
+
+struct tlan_list {
+	u32		forward;
+	u16		c_stat;
+	u16		frame_size;
+	struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+	/*****************************************************************
+	 * PHY definitions
+	 *
+	 ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR	0x1F
+#define TLAN_PHY_NONE		0x20
+
+
+
+
+	/*****************************************************************
+	 * TLAN Private Information Structure
+	 *
+	 ****************************************************************/
+
+struct tlan_priv {
+	struct net_device       *next_device;
+	struct pci_dev		*pci_dev;
+	struct net_device       *dev;
+	void			*dma_storage;
+	dma_addr_t		dma_storage_dma;
+	unsigned int		dma_size;
+	u8			*pad_buffer;
+	struct tlan_list	*rx_list;
+	dma_addr_t		rx_list_dma;
+	u8			*rx_buffer;
+	dma_addr_t		rx_buffer_dma;
+	u32			rx_head;
+	u32			rx_tail;
+	u32			rx_eoc_count;
+	struct tlan_list	*tx_list;
+	dma_addr_t		tx_list_dma;
+	u8			*tx_buffer;
+	dma_addr_t		tx_buffer_dma;
+	u32			tx_head;
+	u32			tx_in_progress;
+	u32			tx_tail;
+	u32			tx_busy_count;
+	u32			phy_online;
+	u32			timer_set_at;
+	u32			timer_type;
+	struct timer_list	timer;
+	struct timer_list	media_timer;
+	struct board		*adapter;
+	u32			adapter_rev;
+	u32			aui;
+	u32			debug;
+	u32			duplex;
+	u32			phy[2];
+	u32			phy_num;
+	u32			speed;
+	u8			tlan_rev;
+	u8			tlan_full_duplex;
+	spinlock_t		lock;
+	struct work_struct			tlan_tqueue;
+};
+
+
+
+
+	/*****************************************************************
+	 * TLan Driver Timer Definitions
+	 *
+	 ****************************************************************/
+
+#define TLAN_TIMER_ACTIVITY		2
+#define TLAN_TIMER_PHY_PDOWN		3
+#define TLAN_TIMER_PHY_PUP		4
+#define TLAN_TIMER_PHY_RESET		5
+#define TLAN_TIMER_PHY_START_LINK	6
+#define TLAN_TIMER_PHY_FINISH_AN	7
+#define TLAN_TIMER_FINISH_RESET		8
+
+#define TLAN_TIMER_ACT_DELAY		(HZ/10)
+
+
+
+
+	/*****************************************************************
+	 * TLan Driver Eeprom Definitions
+	 *
+	 ****************************************************************/
+
+#define TLAN_EEPROM_ACK		0
+#define TLAN_EEPROM_STOP	1
+
+#define TLAN_EEPROM_SIZE	256
+
+
+
+	/*****************************************************************
+	 * Host Register Offsets and Contents
+	 *
+	 ****************************************************************/
+
+#define TLAN_HOST_CMD			0x00
+#define	TLAN_HC_GO		0x80000000
+#define		TLAN_HC_STOP		0x40000000
+#define		TLAN_HC_ACK		0x20000000
+#define		TLAN_HC_CS_MASK		0x1FE00000
+#define		TLAN_HC_EOC		0x00100000
+#define		TLAN_HC_RT		0x00080000
+#define		TLAN_HC_NES		0x00040000
+#define		TLAN_HC_AD_RST		0x00008000
+#define		TLAN_HC_LD_TMR		0x00004000
+#define		TLAN_HC_LD_THR		0x00002000
+#define		TLAN_HC_REQ_INT		0x00001000
+#define		TLAN_HC_INT_OFF		0x00000800
+#define		TLAN_HC_INT_ON		0x00000400
+#define		TLAN_HC_AC_MASK		0x000000FF
+#define TLAN_CH_PARM			0x04
+#define TLAN_DIO_ADR			0x08
+#define		TLAN_DA_ADR_INC		0x8000
+#define		TLAN_DA_RAM_ADR		0x4000
+#define TLAN_HOST_INT			0x0A
+#define		TLAN_HI_IV_MASK		0x1FE0
+#define		TLAN_HI_IT_MASK		0x001C
+#define TLAN_DIO_DATA			0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD			0x00
+#define		TLAN_NET_CMD_NRESET	0x80
+#define		TLAN_NET_CMD_NWRAP	0x40
+#define		TLAN_NET_CMD_CSF	0x20
+#define		TLAN_NET_CMD_CAF	0x10
+#define		TLAN_NET_CMD_NOBRX	0x08
+#define		TLAN_NET_CMD_DUPLEX	0x04
+#define		TLAN_NET_CMD_TRFRAM	0x02
+#define		TLAN_NET_CMD_TXPACE	0x01
+#define TLAN_NET_SIO			0x01
+#define	TLAN_NET_SIO_MINTEN	0x80
+#define		TLAN_NET_SIO_ECLOK	0x40
+#define		TLAN_NET_SIO_ETXEN	0x20
+#define		TLAN_NET_SIO_EDATA	0x10
+#define		TLAN_NET_SIO_NMRST	0x08
+#define		TLAN_NET_SIO_MCLK	0x04
+#define		TLAN_NET_SIO_MTXEN	0x02
+#define		TLAN_NET_SIO_MDATA	0x01
+#define TLAN_NET_STS			0x02
+#define		TLAN_NET_STS_MIRQ	0x80
+#define		TLAN_NET_STS_HBEAT	0x40
+#define		TLAN_NET_STS_TXSTOP	0x20
+#define		TLAN_NET_STS_RXSTOP	0x10
+#define		TLAN_NET_STS_RSRVD	0x0F
+#define TLAN_NET_MASK			0x03
+#define		TLAN_NET_MASK_MASK7	0x80
+#define		TLAN_NET_MASK_MASK6	0x40
+#define		TLAN_NET_MASK_MASK5	0x20
+#define		TLAN_NET_MASK_MASK4	0x10
+#define		TLAN_NET_MASK_RSRVD	0x0F
+#define TLAN_NET_CONFIG			0x04
+#define	TLAN_NET_CFG_RCLK	0x8000
+#define		TLAN_NET_CFG_TCLK	0x4000
+#define		TLAN_NET_CFG_BIT	0x2000
+#define		TLAN_NET_CFG_RXCRC	0x1000
+#define		TLAN_NET_CFG_PEF	0x0800
+#define		TLAN_NET_CFG_1FRAG	0x0400
+#define		TLAN_NET_CFG_1CHAN	0x0200
+#define		TLAN_NET_CFG_MTEST	0x0100
+#define		TLAN_NET_CFG_PHY_EN	0x0080
+#define		TLAN_NET_CFG_MSMASK	0x007F
+#define TLAN_MAN_TEST			0x06
+#define TLAN_DEF_VENDOR_ID		0x08
+#define TLAN_DEF_DEVICE_ID		0x0A
+#define TLAN_DEF_REVISION		0x0C
+#define TLAN_DEF_SUBCLASS		0x0D
+#define TLAN_DEF_MIN_LAT		0x0E
+#define TLAN_DEF_MAX_LAT		0x0F
+#define TLAN_AREG_0			0x10
+#define TLAN_AREG_1			0x16
+#define TLAN_AREG_2			0x1C
+#define TLAN_AREG_3			0x22
+#define TLAN_HASH_1			0x28
+#define TLAN_HASH_2			0x2C
+#define TLAN_GOOD_TX_FRMS		0x30
+#define TLAN_TX_UNDERUNS		0x33
+#define TLAN_GOOD_RX_FRMS		0x34
+#define TLAN_RX_OVERRUNS		0x37
+#define TLAN_DEFERRED_TX		0x38
+#define TLAN_CRC_ERRORS			0x3A
+#define TLAN_CODE_ERRORS		0x3B
+#define TLAN_MULTICOL_FRMS		0x3C
+#define TLAN_SINGLECOL_FRMS		0x3E
+#define TLAN_EXCESSCOL_FRMS		0x40
+#define TLAN_LATE_COLS			0x41
+#define TLAN_CARRIER_LOSS		0x42
+#define TLAN_ACOMMIT			0x43
+#define TLAN_LED_REG			0x44
+#define		TLAN_LED_ACT		0x10
+#define		TLAN_LED_LINK		0x01
+#define TLAN_BSIZE_REG			0x45
+#define TLAN_MAX_RX			0x46
+#define TLAN_INT_DIS			0x48
+#define		TLAN_ID_TX_EOC		0x04
+#define		TLAN_ID_RX_EOF		0x02
+#define		TLAN_ID_RX_EOC		0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS	8
+
+#define TLAN_INT_NONE			0x0000
+#define TLAN_INT_TX_EOF			0x0001
+#define TLAN_INT_STAT_OVERFLOW		0x0002
+#define TLAN_INT_RX_EOF			0x0003
+#define TLAN_INT_DUMMY			0x0004
+#define TLAN_INT_TX_EOC			0x0005
+#define TLAN_INT_STATUS_CHECK		0x0006
+#define TLAN_INT_RX_EOC			0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL			0x00
+#define	MII_GC_RESET		0x8000
+#define		MII_GC_LOOPBK		0x4000
+#define		MII_GC_SPEEDSEL		0x2000
+#define		MII_GC_AUTOENB		0x1000
+#define		MII_GC_PDOWN		0x0800
+#define		MII_GC_ISOLATE		0x0400
+#define		MII_GC_AUTORSRT		0x0200
+#define		MII_GC_DUPLEX		0x0100
+#define		MII_GC_COLTEST		0x0080
+#define		MII_GC_RESERVED		0x007F
+#define MII_GEN_STS			0x01
+#define		MII_GS_100BT4		0x8000
+#define		MII_GS_100BTXFD		0x4000
+#define		MII_GS_100BTXHD		0x2000
+#define		MII_GS_10BTFD		0x1000
+#define		MII_GS_10BTHD		0x0800
+#define		MII_GS_RESERVED		0x07C0
+#define		MII_GS_AUTOCMPLT	0x0020
+#define		MII_GS_RFLT		0x0010
+#define		MII_GS_AUTONEG		0x0008
+#define		MII_GS_LINK		0x0004
+#define		MII_GS_JABBER		0x0002
+#define		MII_GS_EXTCAP		0x0001
+#define MII_GEN_ID_HI			0x02
+#define MII_GEN_ID_LO			0x03
+#define	MII_GIL_OUI		0xFC00
+#define	MII_GIL_MODEL		0x03F0
+#define	MII_GIL_REVISION	0x000F
+#define MII_AN_ADV			0x04
+#define MII_AN_LPA			0x05
+#define MII_AN_EXP			0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID			0x10
+#define TLAN_TLPHY_CTL			0x11
+#define	TLAN_TC_IGLINK		0x8000
+#define		TLAN_TC_SWAPOL		0x4000
+#define		TLAN_TC_AUISEL		0x2000
+#define		TLAN_TC_SQEEN		0x1000
+#define		TLAN_TC_MTEST		0x0800
+#define		TLAN_TC_RESERVED	0x07F8
+#define		TLAN_TC_NFEW		0x0004
+#define		TLAN_TC_INTEN		0x0002
+#define		TLAN_TC_TINT		0x0001
+#define TLAN_TLPHY_STS			0x12
+#define		TLAN_TS_MINT		0x8000
+#define		TLAN_TS_PHOK		0x4000
+#define		TLAN_TS_POLOK		0x2000
+#define		TLAN_TS_TPENERGY	0x1000
+#define		TLAN_TS_RESERVED	0x0FFF
+#define TLAN_TLPHY_PAR			0x19
+#define		TLAN_PHY_CIM_STAT	0x0020
+#define		TLAN_PHY_SPEED_100	0x0040
+#define		TLAN_PHY_DUPLEX_FULL	0x0080
+#define		TLAN_PHY_AN_EN_STAT     0x0400
+
+/* National Sem. & Level1 PHY id's */
+#define NAT_SEM_ID1			0x2000
+#define NAT_SEM_ID2			0x5C01
+#define LEVEL1_ID1			0x7810
+#define LEVEL1_ID2			0x0000
+
+#define CIRC_INC(a, b) if (++a >= b) a = 0
+
+/* Routines to access internal registers. */
+
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
+{
+	outw(internal_addr, base_addr + TLAN_DIO_ADR);
+	return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
+{
+	outw(internal_addr, base_addr + TLAN_DIO_ADR);
+	return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
+{
+	outw(internal_addr, base_addr + TLAN_DIO_ADR);
+	return inl(base_addr + TLAN_DIO_DATA);
+
+}
+
+
+
+
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
+{
+	outw(internal_addr, base_addr + TLAN_DIO_ADR);
+	outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
+{
+	outw(internal_addr, base_addr + TLAN_DIO_ADR);
+	outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
+{
+	outw(internal_addr, base_addr + TLAN_DIO_ADR);
+	outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+#define tlan_clear_bit(bit, port)	outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port)	((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port)	outb_p(inb_p(port) | bit, port)
+
+/*
+ * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
+ * the code below is about seven times as fast as the original code
+ *
+ * The original code was:
+ *
+ * u32	xor(u32 a, u32 b) {	return ((a && !b ) || (! a && b )); }
+ *
+ * #define XOR8(a, b, c, d, e, f, g, h)	\
+ *	xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit)		(( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
+ *
+ *	hash  = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ *		      DA(a,30), DA(a,36), DA(a,42));
+ *	hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ *		      DA(a,31), DA(a,37), DA(a,43)) << 1;
+ *	hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ *		      DA(a,32), DA(a,38), DA(a,44)) << 2;
+ *	hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ *		      DA(a,33), DA(a,39), DA(a,45)) << 3;
+ *	hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ *		      DA(a,34), DA(a,40), DA(a,46)) << 4;
+ *	hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ *		      DA(a,35), DA(a,41), DA(a,47)) << 5;
+ *
+ */
+static inline u32 tlan_hash_func(const u8 *a)
+{
+	u8     hash;
+
+	hash = (a[0]^a[3]);		/* & 077 */
+	hash ^= ((a[0]^a[3])>>6);	/* & 003 */
+	hash ^= ((a[1]^a[4])<<2);	/* & 074 */
+	hash ^= ((a[1]^a[4])>>4);	/* & 017 */
+	hash ^= ((a[2]^a[5])<<4);	/* & 060 */
+	hash ^= ((a[2]^a[5])>>2);	/* & 077 */
+
+	return hash & 077;
+}
+#endif