v4.19.13 snapshot.
diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
new file mode 100644
index 0000000..59efe5b
--- /dev/null
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -0,0 +1,2 @@
+source "drivers/net/ethernet/apm/xgene/Kconfig"
+source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
new file mode 100644
index 0000000..946b2a4
--- /dev/null
+++ b/drivers/net/ethernet/apm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for APM X-GENE Ethernet driver.
+#
+
+obj-$(CONFIG_NET_XGENE) += xgene/
+obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644
index 0000000..eedd3f3
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -0,0 +1,10 @@
+config NET_XGENE_V2
+	tristate "APM X-Gene SoC Ethernet-v2 Driver"
+	depends on ARCH_XGENE || COMPILE_TEST
+	help
+	  This is the Ethernet driver for the on-chip ethernet interface
+	  which uses a linked list of DMA descriptor architecture (v2) for
+	  APM X-Gene SoCs.
+
+	  To compile this driver as a module, choose M here. This module will
+	  be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644
index 0000000..f16a2b3
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for APM X-Gene Ethernet v2 driver
+#
+
+xgene-enet-v2-objs := main.o mac.o enet.o ring.o mdio.o ethtool.o
+obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c
new file mode 100644
index 0000000..5998da0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.c
@@ -0,0 +1,83 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->resources.base_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset)
+{
+	void __iomem *addr = pdata->resources.base_addr + offset;
+
+	return ioread32(addr);
+}
+
+int xge_port_reset(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	u32 data, wait = 10;
+
+	xge_wr_csr(pdata, ENET_CLKEN, 0x3);
+	xge_wr_csr(pdata, ENET_SRST, 0xf);
+	xge_wr_csr(pdata, ENET_SRST, 0);
+	xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 1);
+	xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 0);
+
+	do {
+		usleep_range(100, 110);
+		data = xge_rd_csr(pdata, BLOCK_MEM_RDY);
+	} while (data != MEM_RDY && wait--);
+
+	if (data != MEM_RDY) {
+		dev_err(dev, "ECC init failed: %x\n", data);
+		return -ETIMEDOUT;
+	}
+
+	xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH);
+
+	return 0;
+}
+
+static void xge_traffic_resume(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+
+	xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1);
+	xge_wr_csr(pdata, FORCE_LINK_STATUS, 1);
+
+	xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1);
+	xge_wr_csr(pdata, RX_DV_GATE_REG, 1);
+}
+
+void xge_port_init(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+
+	pdata->phy_speed = SPEED_1000;
+	xge_mac_init(pdata);
+	xge_traffic_resume(ndev);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h
new file mode 100644
index 0000000..3fd36dc
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.h
@@ -0,0 +1,45 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ENET_H__
+#define __XGENE_ENET_V2_ENET_H__
+
+#define ENET_CLKEN		0xc008
+#define ENET_SRST		0xc000
+#define ENET_SHIM		0xc010
+#define CFG_MEM_RAM_SHUTDOWN	0xd070
+#define BLOCK_MEM_RDY		0xd074
+
+#define MEM_RDY			0xffffffff
+#define DEVM_ARAUX_COH		BIT(19)
+#define DEVM_AWAUX_COH		BIT(3)
+
+#define CFG_FORCE_LINK_STATUS_EN	0x229c
+#define FORCE_LINK_STATUS		0x22a0
+#define CFG_LINK_AGGR_RESUME		0x27c8
+#define RX_DV_GATE_REG			0x2dfc
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val);
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset);
+int xge_port_reset(struct net_device *ndev);
+void xge_port_init(struct net_device *ndev);
+
+#endif  /* __XGENE_ENET_V2_ENET__H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
new file mode 100644
index 0000000..d31ad82
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -0,0 +1,189 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+#define XGE_STAT(m)		{ #m, offsetof(struct xge_pdata, stats.m) }
+#define XGE_EXTD_STAT(m, n)					\
+	{							\
+		#m,						\
+		n,						\
+		0						\
+	}
+
+static const struct xge_gstrings_stats gstrings_stats[] = {
+	XGE_STAT(rx_packets),
+	XGE_STAT(tx_packets),
+	XGE_STAT(rx_bytes),
+	XGE_STAT(tx_bytes),
+	XGE_STAT(rx_errors)
+};
+
+static struct xge_gstrings_extd_stats gstrings_extd_stats[] = {
+	XGE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64),
+	XGE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127),
+	XGE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255),
+	XGE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511),
+	XGE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K),
+	XGE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX),
+	XGE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV),
+	XGE_EXTD_STAT(rx_fcs_error_cntr, RFCS),
+	XGE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA),
+	XGE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA),
+	XGE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF),
+	XGE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF),
+	XGE_EXTD_STAT(rx_unk_opcode_cntr, RXUO),
+	XGE_EXTD_STAT(rx_align_err_cntr, RALN),
+	XGE_EXTD_STAT(rx_frame_len_err_cntr, RFLR),
+	XGE_EXTD_STAT(rx_code_err_cntr, RCDE),
+	XGE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE),
+	XGE_EXTD_STAT(rx_undersize_pkt_cntr, RUND),
+	XGE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR),
+	XGE_EXTD_STAT(rx_fragments_cntr, RFRG),
+	XGE_EXTD_STAT(rx_jabber_cntr, RJBR),
+	XGE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP),
+	XGE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA),
+	XGE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA),
+	XGE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF),
+	XGE_EXTD_STAT(tx_defer_pkt_cntr, TDFR),
+	XGE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF),
+	XGE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL),
+	XGE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL),
+	XGE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL),
+	XGE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL),
+	XGE_EXTD_STAT(tx_total_col_cntr, TNCL),
+	XGE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH),
+	XGE_EXTD_STAT(tx_drop_frame_cntr, TDRP),
+	XGE_EXTD_STAT(tx_jabber_frame_cntr, TJBR),
+	XGE_EXTD_STAT(tx_fcs_error_cntr, TFCS),
+	XGE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF),
+	XGE_EXTD_STAT(tx_oversize_frame_cntr, TOVR),
+	XGE_EXTD_STAT(tx_undersize_frame_cntr, TUND),
+	XGE_EXTD_STAT(tx_fragments_cntr, TFRG)
+};
+
+#define XGE_STATS_LEN		ARRAY_SIZE(gstrings_stats)
+#define XGE_EXTD_STATS_LEN	ARRAY_SIZE(gstrings_extd_stats)
+
+static void xge_mac_get_extd_stats(struct xge_pdata *pdata)
+{
+	u32 data;
+	int i;
+
+	for (i = 0; i < XGE_EXTD_STATS_LEN; i++) {
+		data = xge_rd_csr(pdata, gstrings_extd_stats[i].addr);
+		gstrings_extd_stats[i].value += data;
+	}
+}
+
+static void xge_get_drvinfo(struct net_device *ndev,
+			    struct ethtool_drvinfo *info)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct platform_device *pdev = pdata->pdev;
+
+	strcpy(info->driver, "xgene-enet-v2");
+	strcpy(info->version, XGENE_ENET_V2_VERSION);
+	snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
+	sprintf(info->bus_info, "%s", pdev->name);
+}
+
+static void xge_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < XGE_STATS_LEN; i++) {
+		memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+
+	for (i = 0; i < XGE_EXTD_STATS_LEN; i++) {
+		memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+}
+
+static int xge_get_sset_count(struct net_device *ndev, int sset)
+{
+	if (sset != ETH_SS_STATS)
+		return -EINVAL;
+
+	return XGE_STATS_LEN + XGE_EXTD_STATS_LEN;
+}
+
+static void xge_get_ethtool_stats(struct net_device *ndev,
+				  struct ethtool_stats *dummy,
+				  u64 *data)
+{
+	void *pdata = netdev_priv(ndev);
+	int i;
+
+	for (i = 0; i < XGE_STATS_LEN; i++)
+		*data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+
+	xge_mac_get_extd_stats(pdata);
+
+	for (i = 0; i < XGE_EXTD_STATS_LEN; i++)
+		*data++ = gstrings_extd_stats[i].value;
+}
+
+static int xge_get_link_ksettings(struct net_device *ndev,
+				  struct ethtool_link_ksettings *cmd)
+{
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	phy_ethtool_ksettings_get(phydev, cmd);
+
+	return 0;
+}
+
+static int xge_set_link_ksettings(struct net_device *ndev,
+				  const struct ethtool_link_ksettings *cmd)
+{
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_ksettings_set(phydev, cmd);
+}
+
+static const struct ethtool_ops xge_ethtool_ops = {
+	.get_drvinfo = xge_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_strings = xge_get_strings,
+	.get_sset_count = xge_get_sset_count,
+	.get_ethtool_stats = xge_get_ethtool_stats,
+	.get_link_ksettings = xge_get_link_ksettings,
+	.set_link_ksettings = xge_set_link_ksettings,
+};
+
+void xge_set_ethtool_ops(struct net_device *ndev)
+{
+	ndev->ethtool_ops = &xge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.h b/drivers/net/ethernet/apm/xgene-v2/ethtool.h
new file mode 100644
index 0000000..54b48d5
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.h
@@ -0,0 +1,78 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ETHTOOL_H__
+#define __XGENE_ENET_V2_ETHTOOL_H__
+
+struct xge_gstrings_stats {
+	char name[ETH_GSTRING_LEN];
+	int offset;
+};
+
+struct xge_gstrings_extd_stats {
+	char name[ETH_GSTRING_LEN];
+	u32 addr;
+	u32 value;
+};
+
+#define TR64			0xa080
+#define TR127			0xa084
+#define TR255			0xa088
+#define TR511			0xa08c
+#define TR1K			0xa090
+#define TRMAX			0xa094
+#define TRMGV			0xa098
+#define RFCS			0xa0a4
+#define RMCA			0xa0a8
+#define RBCA			0xa0ac
+#define RXCF			0xa0b0
+#define RXPF			0xa0b4
+#define RXUO			0xa0b8
+#define RALN			0xa0bc
+#define RFLR			0xa0c0
+#define RCDE			0xa0c4
+#define RCSE			0xa0c8
+#define RUND			0xa0cc
+#define ROVR			0xa0d0
+#define RFRG			0xa0d4
+#define RJBR			0xa0d8
+#define RDRP			0xa0dc
+#define TMCA			0xa0e8
+#define TBCA			0xa0ec
+#define TXPF			0xa0f0
+#define TDFR			0xa0f4
+#define TEDF			0xa0f8
+#define TSCL			0xa0fc
+#define TMCL			0xa100
+#define TLCL			0xa104
+#define TXCL			0xa108
+#define TNCL			0xa10c
+#define TPFH			0xa110
+#define TDRP			0xa114
+#define TJBR			0xa118
+#define TFCS			0xa11c
+#define TXCF			0xa120
+#define TOVR			0xa124
+#define TUND			0xa128
+#define TFRG			0xa12c
+
+void xge_set_ethtool_ops(struct net_device *ndev);
+
+#endif  /* __XGENE_ENET_V2_ETHTOOL_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
new file mode 100644
index 0000000..ee431e3
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -0,0 +1,116 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_mac_reset(struct xge_pdata *pdata)
+{
+	xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET);
+	xge_wr_csr(pdata, MAC_CONFIG_1, 0);
+}
+
+void xge_mac_set_speed(struct xge_pdata *pdata)
+{
+	u32 icm0, icm2, ecm0, mc2;
+	u32 intf_ctrl, rgmii;
+
+	icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0);
+	icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0);
+	ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0);
+	rgmii = xge_rd_csr(pdata, RGMII_REG_0);
+	mc2 = xge_rd_csr(pdata, MAC_CONFIG_2);
+	intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL);
+	icm2 |= CFG_WAITASYNCRD_EN;
+
+	switch (pdata->phy_speed) {
+	case SPEED_10:
+		SET_REG_BITS(&mc2, INTF_MODE, 1);
+		SET_REG_BITS(&intf_ctrl, HD_MODE, 0);
+		SET_REG_BITS(&icm0, CFG_MACMODE, 0);
+		SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500);
+		SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+		break;
+	case SPEED_100:
+		SET_REG_BITS(&mc2, INTF_MODE, 1);
+		SET_REG_BITS(&intf_ctrl, HD_MODE, 1);
+		SET_REG_BITS(&icm0, CFG_MACMODE, 1);
+		SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80);
+		SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+		break;
+	default:
+		SET_REG_BITS(&mc2, INTF_MODE, 2);
+		SET_REG_BITS(&intf_ctrl, HD_MODE, 2);
+		SET_REG_BITS(&icm0, CFG_MACMODE, 2);
+		SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16);
+		SET_REG_BIT(&rgmii, CFG_SPEED_125, 1);
+		break;
+	}
+
+	mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC;
+	SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32);
+
+	xge_wr_csr(pdata, MAC_CONFIG_2, mc2);
+	xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl);
+	xge_wr_csr(pdata, RGMII_REG_0, rgmii);
+	xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0);
+	xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2);
+	xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0);
+}
+
+void xge_mac_set_station_addr(struct xge_pdata *pdata)
+{
+	u8 *dev_addr = pdata->ndev->dev_addr;
+	u32 addr0, addr1;
+
+	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+		(dev_addr[1] << 8) | dev_addr[0];
+	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+	xge_wr_csr(pdata, STATION_ADDR0, addr0);
+	xge_wr_csr(pdata, STATION_ADDR1, addr1);
+}
+
+void xge_mac_init(struct xge_pdata *pdata)
+{
+	xge_mac_reset(pdata);
+	xge_mac_set_speed(pdata);
+	xge_mac_set_station_addr(pdata);
+}
+
+void xge_mac_enable(struct xge_pdata *pdata)
+{
+	u32 data;
+
+	data = xge_rd_csr(pdata, MAC_CONFIG_1);
+	data |= TX_EN | RX_EN;
+	xge_wr_csr(pdata, MAC_CONFIG_1, data);
+
+	data = xge_rd_csr(pdata, MAC_CONFIG_1);
+}
+
+void xge_mac_disable(struct xge_pdata *pdata)
+{
+	u32 data;
+
+	data = xge_rd_csr(pdata, MAC_CONFIG_1);
+	data &= ~(TX_EN | RX_EN);
+	xge_wr_csr(pdata, MAC_CONFIG_1, data);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h
new file mode 100644
index 0000000..3c83fa6
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.h
@@ -0,0 +1,107 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAC_H__
+#define __XGENE_ENET_V2_MAC_H__
+
+/* Register offsets */
+#define MAC_CONFIG_1		0xa000
+#define MAC_CONFIG_2		0xa004
+#define MII_MGMT_CONFIG		0xa020
+#define MII_MGMT_COMMAND	0xa024
+#define MII_MGMT_ADDRESS	0xa028
+#define MII_MGMT_CONTROL	0xa02c
+#define MII_MGMT_STATUS		0xa030
+#define MII_MGMT_INDICATORS	0xa034
+#define INTERFACE_CONTROL	0xa038
+#define STATION_ADDR0		0xa040
+#define STATION_ADDR1		0xa044
+
+#define RGMII_REG_0		0x27e0
+#define ICM_CONFIG0_REG_0	0x2c00
+#define ICM_CONFIG2_REG_0	0x2c08
+#define ECM_CONFIG0_REG_0	0x2d00
+
+/* Register fields */
+#define SOFT_RESET		BIT(31)
+#define TX_EN			BIT(0)
+#define RX_EN			BIT(2)
+#define PAD_CRC			BIT(2)
+#define CRC_EN			BIT(1)
+#define FULL_DUPLEX		BIT(0)
+
+#define INTF_MODE_POS		8
+#define INTF_MODE_LEN		2
+#define HD_MODE_POS		25
+#define HD_MODE_LEN		2
+#define CFG_MACMODE_POS		18
+#define CFG_MACMODE_LEN		2
+#define CFG_WAITASYNCRD_POS	0
+#define CFG_WAITASYNCRD_LEN	16
+#define CFG_SPEED_125_POS	24
+#define CFG_WFIFOFULLTHR_POS	0
+#define CFG_WFIFOFULLTHR_LEN	7
+#define MGMT_CLOCK_SEL_POS	0
+#define MGMT_CLOCK_SEL_LEN	3
+#define PHY_ADDR_POS		8
+#define PHY_ADDR_LEN		5
+#define REG_ADDR_POS		0
+#define REG_ADDR_LEN		5
+#define MII_MGMT_BUSY		BIT(0)
+#define MII_READ_CYCLE		BIT(0)
+#define CFG_WAITASYNCRD_EN	BIT(16)
+
+static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val)
+{
+	u32 mask = GENMASK(pos + len, pos);
+
+	*var &= ~mask;
+	*var |= ((val << pos) & mask);
+}
+
+static inline u32 xgene_get_reg_bits(u32 var, int pos, int len)
+{
+	u32 mask = GENMASK(pos + len, pos);
+
+	return (var & mask) >> pos;
+}
+
+#define SET_REG_BITS(var, field, val)					\
+	xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val)
+
+#define SET_REG_BIT(var, field, val)					\
+	xgene_set_reg_bits(var, field ## _POS, 1, val)
+
+#define GET_REG_BITS(var, field)					\
+	xgene_get_reg_bits(var, field ## _POS, field ## _LEN)
+
+#define GET_REG_BIT(var, field)		((var) & (field))
+
+struct xge_pdata;
+
+void xge_mac_reset(struct xge_pdata *pdata);
+void xge_mac_set_speed(struct xge_pdata *pdata);
+void xge_mac_enable(struct xge_pdata *pdata);
+void xge_mac_disable(struct xge_pdata *pdata);
+void xge_mac_init(struct xge_pdata *pdata);
+void xge_mac_set_station_addr(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_MAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
new file mode 100644
index 0000000..0f2ad50
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -0,0 +1,759 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static const struct acpi_device_id xge_acpi_match[];
+
+static int xge_get_resources(struct xge_pdata *pdata)
+{
+	struct platform_device *pdev;
+	struct net_device *ndev;
+	int phy_mode, ret = 0;
+	struct resource *res;
+	struct device *dev;
+
+	pdev = pdata->pdev;
+	dev = &pdev->dev;
+	ndev = pdata->ndev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Resource enet_csr not defined\n");
+		return -ENODEV;
+	}
+
+	pdata->resources.base_addr = devm_ioremap(dev, res->start,
+						  resource_size(res));
+	if (!pdata->resources.base_addr) {
+		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+		return -ENOMEM;
+	}
+
+	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+		eth_hw_addr_random(ndev);
+
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	phy_mode = device_get_phy_mode(dev);
+	if (phy_mode < 0) {
+		dev_err(dev, "Unable to get phy-connection-type\n");
+		return phy_mode;
+	}
+	pdata->resources.phy_mode = phy_mode;
+
+	if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
+		dev_err(dev, "Incorrect phy-connection-type specified\n");
+		return -ENODEV;
+	}
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(dev, "Unable to get irq\n");
+		return ret;
+	}
+	pdata->resources.irq = ret;
+
+	return 0;
+}
+
+static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct xge_desc_ring *ring = pdata->rx_ring;
+	const u8 slots = XGENE_ENET_NUM_DESC - 1;
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_raw_desc *raw_desc;
+	u64 addr_lo, addr_hi;
+	u8 tail = ring->tail;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	u16 len;
+	int i;
+
+	for (i = 0; i < nbuf; i++) {
+		raw_desc = &ring->raw_desc[tail];
+
+		len = XGENE_ENET_STD_MTU;
+		skb = netdev_alloc_skb(ndev, len);
+		if (unlikely(!skb))
+			return -ENOMEM;
+
+		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			netdev_err(ndev, "DMA mapping error\n");
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		ring->pkt_info[tail].skb = skb;
+		ring->pkt_info[tail].dma_addr = dma_addr;
+
+		addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+		addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+		raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+					   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+					   SET_BITS(PKT_ADDRH,
+						    upper_32_bits(dma_addr)));
+
+		dma_wmb();
+		raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+					   SET_BITS(E, 1));
+		tail = (tail + 1) & slots;
+	}
+
+	ring->tail = tail;
+
+	return 0;
+}
+
+static int xge_init_hw(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	ret = xge_port_reset(ndev);
+	if (ret)
+		return ret;
+
+	xge_port_init(ndev);
+	pdata->nbufs = NUM_BUFS;
+
+	return 0;
+}
+
+static irqreturn_t xge_irq(const int irq, void *data)
+{
+	struct xge_pdata *pdata = data;
+
+	if (napi_schedule_prep(&pdata->napi)) {
+		xge_intr_disable(pdata);
+		__napi_schedule(&pdata->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int xge_request_irq(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
+
+	ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
+			  pdata);
+	if (ret)
+		netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
+
+	return ret;
+}
+
+static void xge_free_irq(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+
+	free_irq(pdata->resources.irq, pdata);
+}
+
+static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
+{
+	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+	    (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
+		return true;
+
+	return false;
+}
+
+static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_desc_ring *tx_ring;
+	struct xge_raw_desc *raw_desc;
+	static dma_addr_t dma_addr;
+	u64 addr_lo, addr_hi;
+	void *pkt_buf;
+	u8 tail;
+	u16 len;
+
+	tx_ring = pdata->tx_ring;
+	tail = tx_ring->tail;
+	len = skb_headlen(skb);
+	raw_desc = &tx_ring->raw_desc[tail];
+
+	if (!is_tx_slot_available(raw_desc)) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* Packet buffers should be 64B aligned */
+	pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+				      GFP_ATOMIC);
+	if (unlikely(!pkt_buf)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	memcpy(pkt_buf, skb->data, len);
+
+	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+				   SET_BITS(PKT_ADDRH,
+					    upper_32_bits(dma_addr)));
+
+	tx_ring->pkt_info[tail].skb = skb;
+	tx_ring->pkt_info[tail].dma_addr = dma_addr;
+	tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
+
+	dma_wmb();
+
+	raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+				   SET_BITS(PKT_SIZE, len) |
+				   SET_BITS(E, 0));
+	skb_tx_timestamp(skb);
+	xge_wr_csr(pdata, DMATXCTRL, 1);
+
+	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
+
+	return NETDEV_TX_OK;
+}
+
+static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
+{
+	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+	    !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
+		return true;
+
+	return false;
+}
+
+static void xge_txc_poll(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_desc_ring *tx_ring;
+	struct xge_raw_desc *raw_desc;
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	void *pkt_buf;
+	u32 data;
+	u8 head;
+
+	tx_ring = pdata->tx_ring;
+	head = tx_ring->head;
+
+	data = xge_rd_csr(pdata, DMATXSTATUS);
+	if (!GET_BITS(TXPKTCOUNT, data))
+		return;
+
+	while (1) {
+		raw_desc = &tx_ring->raw_desc[head];
+
+		if (!is_tx_hw_done(raw_desc))
+			break;
+
+		dma_rmb();
+
+		skb = tx_ring->pkt_info[head].skb;
+		dma_addr = tx_ring->pkt_info[head].dma_addr;
+		pkt_buf = tx_ring->pkt_info[head].pkt_buf;
+		pdata->stats.tx_packets++;
+		pdata->stats.tx_bytes += skb->len;
+		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+		dev_kfree_skb_any(skb);
+
+		/* clear pktstart address and pktsize */
+		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+					   SET_BITS(PKT_SIZE, SLOT_EMPTY));
+		xge_wr_csr(pdata, DMATXSTATUS, 1);
+
+		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+	}
+
+	if (netif_queue_stopped(ndev))
+		netif_wake_queue(ndev);
+
+	tx_ring->head = head;
+}
+
+static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_desc_ring *rx_ring;
+	struct xge_raw_desc *raw_desc;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	int processed = 0;
+	u8 head, rx_error;
+	int i, ret;
+	u32 data;
+	u16 len;
+
+	rx_ring = pdata->rx_ring;
+	head = rx_ring->head;
+
+	data = xge_rd_csr(pdata, DMARXSTATUS);
+	if (!GET_BITS(RXPKTCOUNT, data))
+		return 0;
+
+	for (i = 0; i < budget; i++) {
+		raw_desc = &rx_ring->raw_desc[head];
+
+		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+			break;
+
+		dma_rmb();
+
+		skb = rx_ring->pkt_info[head].skb;
+		rx_ring->pkt_info[head].skb = NULL;
+		dma_addr = rx_ring->pkt_info[head].dma_addr;
+		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
+		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+				 DMA_FROM_DEVICE);
+
+		rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
+		if (unlikely(rx_error)) {
+			pdata->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto out;
+		}
+
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		pdata->stats.rx_packets++;
+		pdata->stats.rx_bytes += len;
+		napi_gro_receive(&pdata->napi, skb);
+out:
+		ret = xge_refill_buffers(ndev, 1);
+		xge_wr_csr(pdata, DMARXSTATUS, 1);
+		xge_wr_csr(pdata, DMARXCTRL, 1);
+
+		if (ret)
+			break;
+
+		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+		processed++;
+	}
+
+	rx_ring->head = head;
+
+	return processed;
+}
+
+static void xge_delete_desc_ring(struct net_device *ndev,
+				 struct xge_desc_ring *ring)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	u16 size;
+
+	if (!ring)
+		return;
+
+	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+	if (ring->desc_addr)
+		dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
+
+	kfree(ring->pkt_info);
+	kfree(ring);
+}
+
+static void xge_free_buffers(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct xge_desc_ring *ring = pdata->rx_ring;
+	struct device *dev = &pdata->pdev->dev;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	int i;
+
+	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+		skb = ring->pkt_info[i].skb;
+		dma_addr = ring->pkt_info[i].dma_addr;
+
+		if (!skb)
+			continue;
+
+		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void xge_delete_desc_rings(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+
+	xge_txc_poll(ndev);
+	xge_delete_desc_ring(ndev, pdata->tx_ring);
+
+	xge_rx_poll(ndev, 64);
+	xge_free_buffers(ndev);
+	xge_delete_desc_ring(ndev, pdata->rx_ring);
+}
+
+static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_desc_ring *ring;
+	u16 size;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	ring->ndev = ndev;
+
+	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
+					      GFP_KERNEL);
+	if (!ring->desc_addr)
+		goto err;
+
+	ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
+				 GFP_KERNEL);
+	if (!ring->pkt_info)
+		goto err;
+
+	xge_setup_desc(ring);
+
+	return ring;
+
+err:
+	xge_delete_desc_ring(ndev, ring);
+
+	return NULL;
+}
+
+static int xge_create_desc_rings(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct xge_desc_ring *ring;
+	int ret;
+
+	/* create tx ring */
+	ring = xge_create_desc_ring(ndev);
+	if (!ring)
+		goto err;
+
+	pdata->tx_ring = ring;
+	xge_update_tx_desc_addr(pdata);
+
+	/* create rx ring */
+	ring = xge_create_desc_ring(ndev);
+	if (!ring)
+		goto err;
+
+	pdata->rx_ring = ring;
+	xge_update_rx_desc_addr(pdata);
+
+	ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	xge_delete_desc_rings(ndev);
+
+	return -ENOMEM;
+}
+
+static int xge_open(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	ret = xge_create_desc_rings(ndev);
+	if (ret)
+		return ret;
+
+	napi_enable(&pdata->napi);
+	ret = xge_request_irq(ndev);
+	if (ret)
+		return ret;
+
+	xge_intr_enable(pdata);
+	xge_wr_csr(pdata, DMARXCTRL, 1);
+
+	phy_start(ndev->phydev);
+	xge_mac_enable(pdata);
+	netif_start_queue(ndev);
+
+	return 0;
+}
+
+static int xge_close(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+	xge_mac_disable(pdata);
+	phy_stop(ndev->phydev);
+
+	xge_intr_disable(pdata);
+	xge_free_irq(ndev);
+	napi_disable(&pdata->napi);
+	xge_delete_desc_rings(ndev);
+
+	return 0;
+}
+
+static int xge_napi(struct napi_struct *napi, const int budget)
+{
+	struct net_device *ndev = napi->dev;
+	struct xge_pdata *pdata;
+	int processed;
+
+	pdata = netdev_priv(ndev);
+
+	xge_txc_poll(ndev);
+	processed = xge_rx_poll(ndev, budget);
+
+	if (processed < budget) {
+		napi_complete_done(napi, processed);
+		xge_intr_enable(pdata);
+	}
+
+	return processed;
+}
+
+static int xge_set_mac_addr(struct net_device *ndev, void *addr)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	ret = eth_mac_addr(ndev, addr);
+	if (ret)
+		return ret;
+
+	xge_mac_set_station_addr(pdata);
+
+	return 0;
+}
+
+static bool is_tx_pending(struct xge_raw_desc *raw_desc)
+{
+	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+		return true;
+
+	return false;
+}
+
+static void xge_free_pending_skb(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_desc_ring *tx_ring;
+	struct xge_raw_desc *raw_desc;
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	void *pkt_buf;
+	int i;
+
+	tx_ring = pdata->tx_ring;
+
+	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+		raw_desc = &tx_ring->raw_desc[i];
+
+		if (!is_tx_pending(raw_desc))
+			continue;
+
+		skb = tx_ring->pkt_info[i].skb;
+		dma_addr = tx_ring->pkt_info[i].dma_addr;
+		pkt_buf = tx_ring->pkt_info[i].pkt_buf;
+		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void xge_timeout(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+
+	rtnl_lock();
+
+	if (!netif_running(ndev))
+		goto out;
+
+	netif_stop_queue(ndev);
+	xge_intr_disable(pdata);
+	napi_disable(&pdata->napi);
+
+	xge_wr_csr(pdata, DMATXCTRL, 0);
+	xge_txc_poll(ndev);
+	xge_free_pending_skb(ndev);
+	xge_wr_csr(pdata, DMATXSTATUS, ~0U);
+
+	xge_setup_desc(pdata->tx_ring);
+	xge_update_tx_desc_addr(pdata);
+	xge_mac_init(pdata);
+
+	napi_enable(&pdata->napi);
+	xge_intr_enable(pdata);
+	xge_mac_enable(pdata);
+	netif_start_queue(ndev);
+
+out:
+	rtnl_unlock();
+}
+
+static void xge_get_stats64(struct net_device *ndev,
+			    struct rtnl_link_stats64 *storage)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct xge_stats *stats = &pdata->stats;
+
+	storage->tx_packets += stats->tx_packets;
+	storage->tx_bytes += stats->tx_bytes;
+
+	storage->rx_packets += stats->rx_packets;
+	storage->rx_bytes += stats->rx_bytes;
+	storage->rx_errors += stats->rx_errors;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+	.ndo_open = xge_open,
+	.ndo_stop = xge_close,
+	.ndo_start_xmit = xge_start_xmit,
+	.ndo_set_mac_address = xge_set_mac_addr,
+	.ndo_tx_timeout = xge_timeout,
+	.ndo_get_stats64 = xge_get_stats64,
+};
+
+static int xge_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct net_device *ndev;
+	struct xge_pdata *pdata;
+	int ret;
+
+	ndev = alloc_etherdev(sizeof(*pdata));
+	if (!ndev)
+		return -ENOMEM;
+
+	pdata = netdev_priv(ndev);
+
+	pdata->pdev = pdev;
+	pdata->ndev = ndev;
+	SET_NETDEV_DEV(ndev, dev);
+	platform_set_drvdata(pdev, pdata);
+	ndev->netdev_ops = &xgene_ndev_ops;
+
+	ndev->features |= NETIF_F_GSO |
+			  NETIF_F_GRO;
+
+	ret = xge_get_resources(pdata);
+	if (ret)
+		goto err;
+
+	ndev->hw_features = ndev->features;
+	xge_set_ethtool_ops(ndev);
+
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret) {
+		netdev_err(ndev, "No usable DMA configuration\n");
+		goto err;
+	}
+
+	ret = xge_init_hw(ndev);
+	if (ret)
+		goto err;
+
+	ret = xge_mdio_config(ndev);
+	if (ret)
+		goto err;
+
+	netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		netdev_err(ndev, "Failed to register netdev\n");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	free_netdev(ndev);
+
+	return ret;
+}
+
+static int xge_remove(struct platform_device *pdev)
+{
+	struct xge_pdata *pdata;
+	struct net_device *ndev;
+
+	pdata = platform_get_drvdata(pdev);
+	ndev = pdata->ndev;
+
+	rtnl_lock();
+	if (netif_running(ndev))
+		dev_close(ndev);
+	rtnl_unlock();
+
+	xge_mdio_remove(ndev);
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+
+	return 0;
+}
+
+static void xge_shutdown(struct platform_device *pdev)
+{
+	struct xge_pdata *pdata;
+
+	pdata = platform_get_drvdata(pdev);
+	if (!pdata)
+		return;
+
+	if (!pdata->ndev)
+		return;
+
+	xge_remove(pdev);
+}
+
+static const struct acpi_device_id xge_acpi_match[] = {
+	{ "APMC0D80" },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
+
+static struct platform_driver xge_driver = {
+	.driver = {
+		   .name = "xgene-enet-v2",
+		   .acpi_match_table = ACPI_PTR(xge_acpi_match),
+	},
+	.probe = xge_probe,
+	.remove = xge_remove,
+	.shutdown = xge_shutdown,
+};
+module_platform_driver(xge_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_VERSION(XGENE_ENET_V2_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
new file mode 100644
index 0000000..969b258
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -0,0 +1,80 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAIN_H__
+#define __XGENE_ENET_V2_MAIN_H__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/prefetch.h>
+#include <linux/phy.h>
+#include <net/ip.h>
+#include "mac.h"
+#include "enet.h"
+#include "ring.h"
+#include "ethtool.h"
+
+#define XGENE_ENET_V2_VERSION	"v1.0"
+#define XGENE_ENET_STD_MTU	1536
+#define XGENE_ENET_MIN_FRAME	60
+#define IRQ_ID_SIZE             16
+
+struct xge_resource {
+	void __iomem *base_addr;
+	int phy_mode;
+	u32 irq;
+};
+
+struct xge_stats {
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_errors;
+};
+
+/* ethernet private data */
+struct xge_pdata {
+	struct xge_resource resources;
+	struct xge_desc_ring *tx_ring;
+	struct xge_desc_ring *rx_ring;
+	struct platform_device *pdev;
+	char irq_name[IRQ_ID_SIZE];
+	struct mii_bus *mdio_bus;
+	struct net_device *ndev;
+	struct napi_struct napi;
+	struct xge_stats stats;
+	int phy_speed;
+	u8 nbufs;
+};
+
+int xge_mdio_config(struct net_device *ndev);
+void xge_mdio_remove(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
new file mode 100644
index 0000000..f5fe3bb
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -0,0 +1,168 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static int xge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+{
+	struct xge_pdata *pdata = bus->priv;
+	u32 done, val = 0;
+	u8 wait = 10;
+
+	SET_REG_BITS(&val, PHY_ADDR, phy_id);
+	SET_REG_BITS(&val, REG_ADDR, reg);
+	xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+	xge_wr_csr(pdata, MII_MGMT_CONTROL, data);
+	do {
+		usleep_range(5, 10);
+		done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+	} while ((done & MII_MGMT_BUSY) && wait--);
+
+	if (done & MII_MGMT_BUSY) {
+		dev_err(&bus->dev, "MII_MGMT write failed\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int xge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+	struct xge_pdata *pdata = bus->priv;
+	u32 data, done, val = 0;
+	u8 wait = 10;
+
+	SET_REG_BITS(&val, PHY_ADDR, phy_id);
+	SET_REG_BITS(&val, REG_ADDR, reg);
+	xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+	xge_wr_csr(pdata, MII_MGMT_COMMAND, MII_READ_CYCLE);
+	do {
+		usleep_range(5, 10);
+		done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+	} while ((done & MII_MGMT_BUSY) && wait--);
+
+	if (done & MII_MGMT_BUSY) {
+		dev_err(&bus->dev, "MII_MGMT read failed\n");
+		return -ETIMEDOUT;
+	}
+
+	data = xge_rd_csr(pdata, MII_MGMT_STATUS);
+	xge_wr_csr(pdata, MII_MGMT_COMMAND, 0);
+
+	return data;
+}
+
+static void xge_adjust_link(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+
+	if (phydev->link) {
+		if (pdata->phy_speed != phydev->speed) {
+			pdata->phy_speed = phydev->speed;
+			xge_mac_set_speed(pdata);
+			xge_mac_enable(pdata);
+			phy_print_status(phydev);
+		}
+	} else {
+		if (pdata->phy_speed != SPEED_UNKNOWN) {
+			pdata->phy_speed = SPEED_UNKNOWN;
+			xge_mac_disable(pdata);
+			phy_print_status(phydev);
+		}
+	}
+}
+
+void xge_mdio_remove(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct mii_bus *mdio_bus = pdata->mdio_bus;
+
+	if (ndev->phydev)
+		phy_disconnect(ndev->phydev);
+
+	if (mdio_bus->state == MDIOBUS_REGISTERED)
+		mdiobus_unregister(mdio_bus);
+
+	mdiobus_free(mdio_bus);
+}
+
+int xge_mdio_config(struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct mii_bus *mdio_bus;
+	struct phy_device *phydev;
+	int ret;
+
+	mdio_bus = mdiobus_alloc();
+	if (!mdio_bus)
+		return -ENOMEM;
+
+	mdio_bus->name = "APM X-Gene Ethernet (v2) MDIO Bus";
+	mdio_bus->read = xge_mdio_read;
+	mdio_bus->write = xge_mdio_write;
+	mdio_bus->priv = pdata;
+	mdio_bus->parent = dev;
+	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+	pdata->mdio_bus = mdio_bus;
+
+	mdio_bus->phy_mask = 0x1;
+	ret = mdiobus_register(mdio_bus);
+	if (ret)
+		goto err;
+
+	phydev = phy_find_first(mdio_bus);
+	if (!phydev) {
+		dev_err(dev, "no PHY found\n");
+		ret = -ENODEV;
+		goto err;
+	}
+	phydev = phy_connect(ndev, phydev_name(phydev),
+			     &xge_adjust_link,
+			     pdata->resources.phy_mode);
+
+	if (IS_ERR(phydev)) {
+		netdev_err(ndev, "Could not attach to PHY\n");
+		ret = PTR_ERR(phydev);
+		goto err;
+	}
+
+	phydev->supported &= ~(SUPPORTED_10baseT_Half |
+			       SUPPORTED_10baseT_Full |
+			       SUPPORTED_100baseT_Half |
+			       SUPPORTED_100baseT_Full |
+			       SUPPORTED_1000baseT_Half |
+			       SUPPORTED_AUI |
+			       SUPPORTED_MII |
+			       SUPPORTED_FIBRE |
+			       SUPPORTED_BNC);
+	phydev->advertising = phydev->supported;
+	pdata->phy_speed = SPEED_UNKNOWN;
+
+	return 0;
+err:
+	xge_mdio_remove(ndev);
+
+	return ret;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c
new file mode 100644
index 0000000..3881082
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.c
@@ -0,0 +1,81 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+/* create circular linked list of descriptors */
+void xge_setup_desc(struct xge_desc_ring *ring)
+{
+	struct xge_raw_desc *raw_desc;
+	dma_addr_t dma_h, next_dma;
+	u16 offset;
+	int i;
+
+	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+		raw_desc = &ring->raw_desc[i];
+
+		offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1);
+		next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
+
+		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+					   SET_BITS(PKT_SIZE, SLOT_EMPTY));
+		dma_h = upper_32_bits(next_dma);
+		raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) |
+					   SET_BITS(NEXT_DESC_ADDRH, dma_h));
+	}
+}
+
+void xge_update_tx_desc_addr(struct xge_pdata *pdata)
+{
+	struct xge_desc_ring *ring = pdata->tx_ring;
+	dma_addr_t dma_addr = ring->dma_addr;
+
+	xge_wr_csr(pdata, DMATXDESCL, dma_addr);
+	xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr));
+
+	ring->head = 0;
+	ring->tail = 0;
+}
+
+void xge_update_rx_desc_addr(struct xge_pdata *pdata)
+{
+	struct xge_desc_ring *ring = pdata->rx_ring;
+	dma_addr_t dma_addr = ring->dma_addr;
+
+	xge_wr_csr(pdata, DMARXDESCL, dma_addr);
+	xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr));
+
+	ring->head = 0;
+	ring->tail = 0;
+}
+
+void xge_intr_enable(struct xge_pdata *pdata)
+{
+	u32 data;
+
+	data = RX_PKT_RCVD | TX_PKT_SENT;
+	xge_wr_csr(pdata, DMAINTRMASK, data);
+}
+
+void xge_intr_disable(struct xge_pdata *pdata)
+{
+	xge_wr_csr(pdata, DMAINTRMASK, 0);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h
new file mode 100644
index 0000000..abc8c9a
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.h
@@ -0,0 +1,119 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *	      Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_RING_H__
+#define __XGENE_ENET_V2_RING_H__
+
+#define XGENE_ENET_DESC_SIZE	64
+#define XGENE_ENET_NUM_DESC	256
+#define NUM_BUFS		8
+#define SLOT_EMPTY		0xfff
+
+#define DMATXCTRL		0xa180
+#define DMATXDESCL		0xa184
+#define DMATXDESCH		0xa1a0
+#define DMATXSTATUS		0xa188
+#define DMARXCTRL		0xa18c
+#define DMARXDESCL		0xa190
+#define DMARXDESCH		0xa1a4
+#define DMARXSTATUS		0xa194
+#define DMAINTRMASK		0xa198
+#define DMAINTERRUPT		0xa19c
+
+#define D_POS			62
+#define D_LEN			2
+#define E_POS			63
+#define E_LEN			1
+#define PKT_ADDRL_POS		0
+#define PKT_ADDRL_LEN		32
+#define PKT_ADDRH_POS		32
+#define PKT_ADDRH_LEN		10
+#define PKT_SIZE_POS		32
+#define PKT_SIZE_LEN		12
+#define NEXT_DESC_ADDRL_POS	0
+#define NEXT_DESC_ADDRL_LEN	32
+#define NEXT_DESC_ADDRH_POS	48
+#define NEXT_DESC_ADDRH_LEN	10
+
+#define TXPKTCOUNT_POS		16
+#define TXPKTCOUNT_LEN		8
+#define RXPKTCOUNT_POS		16
+#define RXPKTCOUNT_LEN		8
+
+#define TX_PKT_SENT		BIT(0)
+#define TX_BUS_ERROR		BIT(3)
+#define RX_PKT_RCVD		BIT(4)
+#define RX_BUS_ERROR		BIT(7)
+#define RXSTATUS_RXPKTRCVD	BIT(0)
+
+struct xge_raw_desc {
+	__le64 m0;
+	__le64 m1;
+	__le64 m2;
+	__le64 m3;
+	__le64 m4;
+	__le64 m5;
+	__le64 m6;
+	__le64 m7;
+};
+
+struct pkt_info {
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	void *pkt_buf;
+};
+
+/* software context of a descriptor ring */
+struct xge_desc_ring {
+	struct net_device *ndev;
+	dma_addr_t dma_addr;
+	u8 head;
+	u8 tail;
+	union {
+		void *desc_addr;
+		struct xge_raw_desc *raw_desc;
+	};
+	struct pkt_info (*pkt_info);
+};
+
+static inline u64 xge_set_desc_bits(int pos, int len, u64 val)
+{
+	return (val & ((1ULL << len) - 1)) << pos;
+}
+
+static inline u64 xge_get_desc_bits(int pos, int len, u64 src)
+{
+	return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define SET_BITS(field, val) \
+		xge_set_desc_bits(field ## _POS, field ## _LEN, val)
+
+#define GET_BITS(field, src) \
+		xge_get_desc_bits(field ## _POS, field ## _LEN, src)
+
+void xge_setup_desc(struct xge_desc_ring *ring);
+void xge_update_tx_desc_addr(struct xge_pdata *pdata);
+void xge_update_rx_desc_addr(struct xge_pdata *pdata);
+void xge_intr_enable(struct xge_pdata *pdata);
+void xge_intr_disable(struct xge_pdata *pdata);
+
+#endif  /* __XGENE_ENET_V2_RING_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
new file mode 100644
index 0000000..e4e33c9
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -0,0 +1,12 @@
+config NET_XGENE
+	tristate "APM X-Gene SoC Ethernet Driver"
+	depends on ARCH_XGENE || COMPILE_TEST
+	select PHYLIB
+	select MDIO_XGENE
+	select GPIOLIB
+	help
+	  This is the Ethernet driver for the on-chip ethernet interface on the
+	  APM X-Gene SoC.
+
+	  To compile this driver as a module, choose M here. This module will
+	  be called xgene_enet.
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
new file mode 100644
index 0000000..f46321f
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for APM X-Gene Ethernet Driver.
+#
+
+xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
+		   xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o \
+		   xgene_enet_cle.o
+obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
new file mode 100644
index 0000000..e1a51d8
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -0,0 +1,834 @@
+/* Applied Micro X-Gene SoC Ethernet Classifier structures
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Authors: Khuong Dinh <kdinh@apm.com>
+ *          Tanmay Inamdar <tinamdar@apm.com>
+ *          Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+
+/* interfaces to convert structures to HW recognized bit formats */
+static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
+				  enum xgene_cle_prot_type type, u32 len,
+				  u32 *reg)
+{
+	*reg =  SET_VAL(SB_IPFRAG, frag) |
+		SET_VAL(SB_IPPROT, type) |
+		SET_VAL(SB_IPVER, ver) |
+		SET_VAL(SB_HDRLEN, len);
+}
+
+static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata,
+				u32 dstqid, u32 fpsel,
+				u32 nfpsel, u32 *idt_reg)
+{
+	if (pdata->enet_id == XGENE_ENET1) {
+		*idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+			   SET_VAL(IDT_FPSEL1, fpsel)  |
+			   SET_VAL(IDT_NFPSEL1, nfpsel);
+	} else {
+		*idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+			   SET_VAL(IDT_FPSEL, fpsel)   |
+			   SET_VAL(IDT_NFPSEL, nfpsel);
+	}
+}
+
+static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
+				  struct xgene_cle_dbptr *dbptr, u32 *buf)
+{
+	buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
+	buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
+		 SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
+		 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
+
+	buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
+		 SET_VAL(CLE_PRIORITY, dbptr->cle_priority);
+}
+
+static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
+{
+	u32 i, j = 0;
+	u32 data;
+
+	buf[j++] = SET_VAL(CLE_TYPE, kn->node_type);
+	for (i = 0; i < kn->num_keys; i++) {
+		struct xgene_cle_ptree_key *key = &kn->key[i];
+
+		if (!(i % 2)) {
+			buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) |
+				 SET_VAL(CLE_KN_RPTR, key->result_pointer);
+		} else {
+			data = SET_VAL(CLE_KN_PRIO, key->priority) |
+			       SET_VAL(CLE_KN_RPTR, key->result_pointer);
+			buf[j++] |= (data << 16);
+		}
+	}
+}
+
+static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn,
+			       u32 *buf, u32 jb)
+{
+	const struct xgene_cle_ptree_branch *br;
+	u32 i, j = 0;
+	u32 npp;
+
+	buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) |
+		   SET_VAL(CLE_DN_LASTN, dn->last_node) |
+		   SET_VAL(CLE_DN_HLS, dn->hdr_len_store) |
+		   SET_VAL(CLE_DN_EXT, dn->hdr_extn) |
+		   SET_VAL(CLE_DN_BSTOR, dn->byte_store) |
+		   SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) |
+		   SET_VAL(CLE_DN_RPTR, dn->result_pointer);
+
+	for (i = 0; i < dn->num_branches; i++) {
+		br = &dn->branch[i];
+		npp = br->next_packet_pointer;
+
+		if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE))
+			npp += jb;
+
+		buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) |
+			   SET_VAL(CLE_BR_NPPTR, npp) |
+			   SET_VAL(CLE_BR_JB, br->jump_bw) |
+			   SET_VAL(CLE_BR_JR, br->jump_rel) |
+			   SET_VAL(CLE_BR_OP, br->operation) |
+			   SET_VAL(CLE_BR_NNODE, br->next_node) |
+			   SET_VAL(CLE_BR_NBR, br->next_branch);
+
+		buf[j++] = SET_VAL(CLE_BR_DATA, br->data) |
+			   SET_VAL(CLE_BR_MASK, br->mask);
+	}
+}
+
+static int xgene_cle_poll_cmd_done(void __iomem *base,
+				   enum xgene_cle_cmd_type cmd)
+{
+	u32 status, loop = 10;
+	int ret = -EBUSY;
+
+	while (loop--) {
+		status = ioread32(base + INDCMD_STATUS);
+		if (status & cmd) {
+			ret = 0;
+			break;
+		}
+		usleep_range(1000, 2000);
+	}
+
+	return ret;
+}
+
+static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs,
+			     u32 index, enum xgene_cle_dram_type type,
+			     enum xgene_cle_cmd_type cmd)
+{
+	enum xgene_cle_parser parser = cle->active_parser;
+	void __iomem *base = cle->base;
+	u32 i, j, ind_addr;
+	u8 port, nparsers;
+	int ret = 0;
+
+	/* PTREE_RAM onwards, DRAM regions are common for all parsers */
+	nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers;
+
+	for (i = 0; i < nparsers; i++) {
+		port = i;
+		if ((type < PTREE_RAM) && (parser != PARSER_ALL))
+			port = parser;
+
+		ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index;
+		iowrite32(ind_addr, base + INDADDR);
+		for (j = 0; j < nregs; j++)
+			iowrite32(data[j], base + DATA_RAM0 + (j * 4));
+		iowrite32(cmd, base + INDCMD);
+
+		ret = xgene_cle_poll_cmd_done(base, cmd);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata,
+				   struct xgene_enet_cle *cle)
+{
+	struct xgene_cle_ptree *ptree = &cle->ptree;
+	void __iomem *addr, *base = cle->base;
+	u32 offset = CLE_PORT_OFFSET;
+	u32 i;
+
+	/* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
+	ptree->start_pkt += cle->jump_bytes;
+	for (i = 0; i < cle->parsers; i++) {
+		if (cle->active_parser != PARSER_ALL)
+			addr = base + cle->active_parser * offset;
+		else
+			addr = base + (i * offset);
+
+		iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0);
+		iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0);
+	}
+}
+
+static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
+				 struct xgene_enet_cle *cle)
+{
+	struct xgene_cle_ptree *ptree = &cle->ptree;
+	u32 buf[CLE_DRAM_REGS];
+	u32 i;
+	int ret;
+
+	memset(buf, 0, sizeof(buf));
+	for (i = 0; i < ptree->num_dbptr; i++) {
+		xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf);
+		ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr,
+					DB_RAM,	CLE_CMD_WR);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = {
+	{
+		/* PKT_TYPE_NODE */
+		.node_type = EWDN,
+		.last_node = 0,
+		.hdr_len_store = 1,
+		.hdr_extn = NO_BYTE,
+		.byte_store = NO_BYTE,
+		.search_byte_store = NO_BYTE,
+		.result_pointer = DB_RES_DROP,
+		.num_branches = 2,
+		.branch = {
+			{
+				/* IPV4 */
+				.valid = 1,
+				.next_packet_pointer = 22,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = PKT_PROT_NODE,
+				.next_branch = 0,
+				.data = 0x8,
+				.mask = 0x0
+			},
+			{
+				.valid = 0,
+				.next_packet_pointer = 262,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = LAST_NODE,
+				.next_branch = 0,
+				.data = 0x0,
+				.mask = 0xffff
+			}
+		},
+	},
+	{
+		/* PKT_PROT_NODE */
+		.node_type = EWDN,
+		.last_node = 0,
+		.hdr_len_store = 1,
+		.hdr_extn = NO_BYTE,
+		.byte_store = NO_BYTE,
+		.search_byte_store = NO_BYTE,
+		.result_pointer = DB_RES_DROP,
+		.num_branches = 3,
+		.branch = {
+			{
+				/* TCP */
+				.valid = 1,
+				.next_packet_pointer = 26,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_TCP_NODE,
+				.next_branch = 0,
+				.data = 0x0600,
+				.mask = 0x00ff
+			},
+			{
+				/* UDP */
+				.valid = 1,
+				.next_packet_pointer = 26,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_UDP_NODE,
+				.next_branch = 0,
+				.data = 0x1100,
+				.mask = 0x00ff
+			},
+			{
+				.valid = 0,
+				.next_packet_pointer = 26,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_OTHERS_NODE,
+				.next_branch = 0,
+				.data = 0x0,
+				.mask = 0xffff
+			}
+		}
+	},
+	{
+		/* RSS_IPV4_TCP_NODE */
+		.node_type = EWDN,
+		.last_node = 0,
+		.hdr_len_store = 1,
+		.hdr_extn = NO_BYTE,
+		.byte_store = NO_BYTE,
+		.search_byte_store = BOTH_BYTES,
+		.result_pointer = DB_RES_DROP,
+		.num_branches = 6,
+		.branch = {
+			{
+				/* SRC IPV4 B01 */
+				.valid = 0,
+				.next_packet_pointer = 28,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_TCP_NODE,
+				.next_branch = 1,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* SRC IPV4 B23 */
+				.valid = 0,
+				.next_packet_pointer = 30,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_TCP_NODE,
+				.next_branch = 2,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* DST IPV4 B01 */
+				.valid = 0,
+				.next_packet_pointer = 32,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_TCP_NODE,
+				.next_branch = 3,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* DST IPV4 B23 */
+				.valid = 0,
+				.next_packet_pointer = 34,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_TCP_NODE,
+				.next_branch = 4,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* TCP SRC Port */
+				.valid = 0,
+				.next_packet_pointer = 36,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_TCP_NODE,
+				.next_branch = 5,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* TCP DST Port */
+				.valid = 0,
+				.next_packet_pointer = 256,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = LAST_NODE,
+				.next_branch = 0,
+				.data = 0x0,
+				.mask = 0xffff
+			}
+		}
+	},
+	{
+		/* RSS_IPV4_UDP_NODE */
+		.node_type = EWDN,
+		.last_node = 0,
+		.hdr_len_store = 1,
+		.hdr_extn = NO_BYTE,
+		.byte_store = NO_BYTE,
+		.search_byte_store = BOTH_BYTES,
+		.result_pointer = DB_RES_DROP,
+		.num_branches = 6,
+		.branch = {
+			{
+				/* SRC IPV4 B01 */
+				.valid = 0,
+				.next_packet_pointer = 28,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_UDP_NODE,
+				.next_branch = 1,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* SRC IPV4 B23 */
+				.valid = 0,
+				.next_packet_pointer = 30,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_UDP_NODE,
+				.next_branch = 2,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* DST IPV4 B01 */
+				.valid = 0,
+				.next_packet_pointer = 32,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_UDP_NODE,
+				.next_branch = 3,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* DST IPV4 B23 */
+				.valid = 0,
+				.next_packet_pointer = 34,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_UDP_NODE,
+				.next_branch = 4,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* TCP SRC Port */
+				.valid = 0,
+				.next_packet_pointer = 36,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_UDP_NODE,
+				.next_branch = 5,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* TCP DST Port */
+				.valid = 0,
+				.next_packet_pointer = 258,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = LAST_NODE,
+				.next_branch = 0,
+				.data = 0x0,
+				.mask = 0xffff
+			}
+		}
+	},
+	{
+		/* RSS_IPV4_OTHERS_NODE */
+		.node_type = EWDN,
+		.last_node = 0,
+		.hdr_len_store = 1,
+		.hdr_extn = NO_BYTE,
+		.byte_store = NO_BYTE,
+		.search_byte_store = BOTH_BYTES,
+		.result_pointer = DB_RES_DROP,
+		.num_branches = 6,
+		.branch = {
+			{
+				/* SRC IPV4 B01 */
+				.valid = 0,
+				.next_packet_pointer = 28,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_OTHERS_NODE,
+				.next_branch = 1,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* SRC IPV4 B23 */
+				.valid = 0,
+				.next_packet_pointer = 30,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_OTHERS_NODE,
+				.next_branch = 2,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* DST IPV4 B01 */
+				.valid = 0,
+				.next_packet_pointer = 32,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_OTHERS_NODE,
+				.next_branch = 3,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* DST IPV4 B23 */
+				.valid = 0,
+				.next_packet_pointer = 34,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_OTHERS_NODE,
+				.next_branch = 4,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* TCP SRC Port */
+				.valid = 0,
+				.next_packet_pointer = 36,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = RSS_IPV4_OTHERS_NODE,
+				.next_branch = 5,
+				.data = 0x0,
+				.mask = 0xffff
+			},
+			{
+				/* TCP DST Port */
+				.valid = 0,
+				.next_packet_pointer = 260,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = LAST_NODE,
+				.next_branch = 0,
+				.data = 0x0,
+				.mask = 0xffff
+			}
+		}
+	},
+
+	{
+		/* LAST NODE */
+		.node_type = EWDN,
+		.last_node = 1,
+		.hdr_len_store = 1,
+		.hdr_extn = NO_BYTE,
+		.byte_store = NO_BYTE,
+		.search_byte_store = NO_BYTE,
+		.result_pointer = DB_RES_DROP,
+		.num_branches = 1,
+		.branch = {
+			{
+				.valid = 0,
+				.next_packet_pointer = 0,
+				.jump_bw = JMP_FW,
+				.jump_rel = JMP_ABS,
+				.operation = EQT,
+				.next_node = MAX_NODES,
+				.next_branch = 0,
+				.data = 0,
+				.mask = 0xffff
+			}
+		}
+	}
+};
+
+static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
+				struct xgene_enet_cle *cle)
+{
+	struct xgene_cle_ptree *ptree = &cle->ptree;
+	const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn;
+	int num_dn = ARRAY_SIZE(xgene_init_ptree_dn);
+	struct xgene_cle_ptree_kn *kn = ptree->kn;
+	u32 buf[CLE_DRAM_REGS];
+	int i, j, ret;
+
+	memset(buf, 0, sizeof(buf));
+	for (i = 0; i < num_dn; i++) {
+		xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
+		ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
+					PTREE_RAM, CLE_CMD_WR);
+		if (ret)
+			return ret;
+	}
+
+	/* continue node index for key node */
+	memset(buf, 0, sizeof(buf));
+	for (j = i; j < (ptree->num_kn + num_dn); j++) {
+		xgene_cle_kn_to_hw(&kn[j - num_dn], buf);
+		ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
+					PTREE_RAM, CLE_CMD_WR);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
+				 struct xgene_enet_cle *cle)
+{
+	int ret;
+
+	ret = xgene_cle_setup_node(pdata, cle);
+	if (ret)
+		return ret;
+
+	ret = xgene_cle_setup_dbptr(pdata, cle);
+	if (ret)
+		return ret;
+
+	xgene_cle_enable_ptree(pdata, cle);
+
+	return 0;
+}
+
+static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
+				      struct xgene_enet_cle *enet_cle,
+				      struct xgene_cle_dbptr *dbptr,
+				      u32 index, u8 priority)
+{
+	void __iomem *base = enet_cle->base;
+	void __iomem *base_addr;
+	u32 buf[CLE_DRAM_REGS];
+	u32 def_cls, offset;
+	u32 i, j;
+
+	memset(buf, 0, sizeof(buf));
+	xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
+
+	for (i = 0; i < enet_cle->parsers; i++) {
+		if (enet_cle->active_parser != PARSER_ALL) {
+			offset = enet_cle->active_parser *
+				CLE_PORT_OFFSET;
+		} else {
+			offset = i * CLE_PORT_OFFSET;
+		}
+
+		base_addr = base + DFCLSRESDB00 + offset;
+		for (j = 0; j < 6; j++)
+			iowrite32(buf[j], base_addr + (j * 4));
+
+		def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
+		iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
+	}
+}
+
+static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
+{
+	u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
+	u32 mac_hdr_len = ETH_HLEN;
+	u32 sband, reg = 0;
+	u32 ipv4_ihl = 5;
+	u32 hdr_len;
+	int ret;
+
+	/* Sideband: IPV4/TCP packets */
+	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
+	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
+	sband = reg;
+
+	/* Sideband: IPv4/UDP packets */
+	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
+	xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
+	sband |= (reg << 16);
+
+	ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
+	if (ret)
+		return ret;
+
+	/* Sideband: IPv4/RAW packets */
+	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
+	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
+			      hdr_len, &reg);
+	sband = reg;
+
+	/* Sideband: Ethernet II/RAW packets */
+	hdr_len = (mac_hdr_len << 5);
+	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
+			      hdr_len, &reg);
+	sband |= (reg << 16);
+
+	ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
+{
+	u32 secret_key_ipv4[4];  /* 16 Bytes*/
+	int ret = 0;
+
+	get_random_bytes(secret_key_ipv4, 16);
+	ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
+				RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
+	return ret;
+}
+
+static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
+{
+	u32 fpsel, dstqid, nfpsel, idt_reg, idx;
+	int i, ret = 0;
+	u16 pool_id;
+
+	for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
+		idx = i % pdata->rxq_cnt;
+		pool_id = pdata->rx_ring[idx]->buf_pool->id;
+		fpsel = xgene_enet_get_fpsel(pool_id);
+		dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
+		nfpsel = 0;
+		if (pdata->rx_ring[idx]->page_pool) {
+			pool_id = pdata->rx_ring[idx]->page_pool->id;
+			nfpsel = xgene_enet_get_fpsel(pool_id);
+		}
+
+		idt_reg = 0;
+		xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
+		ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
+					RSS_IDT, CLE_CMD_WR);
+		if (ret)
+			return ret;
+	}
+
+	ret = xgene_cle_set_rss_skeys(&pdata->cle);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
+{
+	struct xgene_enet_cle *cle = &pdata->cle;
+	void __iomem *base = cle->base;
+	u32 offset, val = 0;
+	int i, ret = 0;
+
+	offset = CLE_PORT_OFFSET;
+	for (i = 0; i < cle->parsers; i++) {
+		if (cle->active_parser != PARSER_ALL)
+			offset = cle->active_parser * CLE_PORT_OFFSET;
+		else
+			offset = i * CLE_PORT_OFFSET;
+
+		/* enable RSS */
+		val = (RSS_IPV4_12B << 1) | 0x1;
+		writel(val, base + RSS_CTRL0 + offset);
+	}
+
+	/* setup sideband data */
+	ret = xgene_cle_set_rss_sband(cle);
+	if (ret)
+		return ret;
+
+	/* setup indirection table */
+	ret = xgene_cle_set_rss_idt(pdata);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
+{
+	struct xgene_enet_cle *enet_cle = &pdata->cle;
+	u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
+	struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
+	struct xgene_cle_ptree *ptree;
+	struct xgene_cle_ptree_kn kn;
+	int ret;
+
+	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+		return -EINVAL;
+
+	ptree = &enet_cle->ptree;
+	ptree->start_pkt = 12; /* Ethertype */
+
+	ret = xgene_cle_setup_rss(pdata);
+	if (ret) {
+		netdev_err(pdata->ndev, "RSS initialization failed\n");
+		return ret;
+	}
+
+	def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+	pool_id = pdata->rx_ring[0]->buf_pool->id;
+	def_fpsel = xgene_enet_get_fpsel(pool_id);
+	def_nxtfpsel = 0;
+	if (pdata->rx_ring[0]->page_pool) {
+		pool_id = pdata->rx_ring[0]->page_pool->id;
+		def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
+	}
+
+	memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
+	dbptr[DB_RES_ACCEPT].fpsel =  def_fpsel;
+	dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
+	dbptr[DB_RES_ACCEPT].dstqid = def_qid;
+	dbptr[DB_RES_ACCEPT].cle_priority = 1;
+
+	dbptr[DB_RES_DEF].fpsel = def_fpsel;
+	dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
+	dbptr[DB_RES_DEF].dstqid = def_qid;
+	dbptr[DB_RES_DEF].cle_priority = 7;
+	xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
+				  DB_RES_ACCEPT, 7);
+
+	dbptr[DB_RES_DROP].drop = 1;
+
+	memset(&kn, 0, sizeof(kn));
+	kn.node_type = KN;
+	kn.num_keys = 1;
+	kn.key[0].priority = 0;
+	kn.key[0].result_pointer = DB_RES_ACCEPT;
+
+	ptree->kn = &kn;
+	ptree->dbptr = dbptr;
+	ptree->num_kn = 1;
+	ptree->num_dbptr = DB_MAX_PTRS;
+
+	return xgene_cle_setup_ptree(pdata, enet_cle);
+}
+
+const struct xgene_cle_ops xgene_cle3in_ops = {
+	.cle_init = xgene_enet_cle_init,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
new file mode 100644
index 0000000..18fe8d5
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -0,0 +1,302 @@
+/* Applied Micro X-Gene SoC Ethernet Classifier structures
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Authors: Khuong Dinh <kdinh@apm.com>
+ *          Tanmay Inamdar <tinamdar@apm.com>
+ *          Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_CLE_H__
+#define __XGENE_ENET_CLE_H__
+
+#include <linux/io.h>
+#include <linux/random.h>
+
+/* Register offsets */
+#define INDADDR			0x04
+#define INDCMD			0x08
+#define INDCMD_STATUS		0x0c
+#define DATA_RAM0		0x10
+#define SNPTR0			0x0100
+#define SPPTR0			0x0104
+#define DFCLSRESDBPTR0		0x0108
+#define DFCLSRESDB00		0x010c
+#define RSS_CTRL0		0x0000013c
+
+#define CLE_CMD_TO		10	/* ms */
+#define CLE_PKTRAM_SIZE		256	/* bytes */
+#define CLE_PORT_OFFSET		0x200
+#define CLE_DRAM_REGS		17
+
+#define CLE_DN_TYPE_LEN		2
+#define CLE_DN_TYPE_POS		0
+#define CLE_DN_LASTN_LEN	1
+#define CLE_DN_LASTN_POS	2
+#define CLE_DN_HLS_LEN		1
+#define CLE_DN_HLS_POS		3
+#define CLE_DN_EXT_LEN		2
+#define	CLE_DN_EXT_POS		4
+#define CLE_DN_BSTOR_LEN	2
+#define CLE_DN_BSTOR_POS	6
+#define CLE_DN_SBSTOR_LEN	2
+#define CLE_DN_SBSTOR_POS	8
+#define CLE_DN_RPTR_LEN		12
+#define CLE_DN_RPTR_POS		12
+
+#define CLE_BR_VALID_LEN	1
+#define CLE_BR_VALID_POS	0
+#define CLE_BR_NPPTR_LEN	9
+#define CLE_BR_NPPTR_POS	1
+#define CLE_BR_JB_LEN		1
+#define CLE_BR_JB_POS		10
+#define CLE_BR_JR_LEN		1
+#define CLE_BR_JR_POS		11
+#define CLE_BR_OP_LEN		3
+#define CLE_BR_OP_POS		12
+#define CLE_BR_NNODE_LEN	9
+#define CLE_BR_NNODE_POS	15
+#define CLE_BR_NBR_LEN		5
+#define CLE_BR_NBR_POS		24
+
+#define CLE_BR_DATA_LEN		16
+#define CLE_BR_DATA_POS		0
+#define CLE_BR_MASK_LEN		16
+#define CLE_BR_MASK_POS		16
+
+#define CLE_KN_PRIO_POS		0
+#define CLE_KN_PRIO_LEN		3
+#define CLE_KN_RPTR_POS		3
+#define CLE_KN_RPTR_LEN		10
+#define CLE_TYPE_POS		0
+#define CLE_TYPE_LEN		2
+
+#define CLE_DROP_POS		28
+#define CLE_DROP_LEN		1
+#define CLE_DSTQIDL_POS		25
+#define CLE_DSTQIDL_LEN		7
+#define CLE_DSTQIDH_POS		0
+#define CLE_DSTQIDH_LEN		5
+#define CLE_FPSEL_POS		21
+#define CLE_FPSEL_LEN		4
+#define CLE_NFPSEL_POS		17
+#define CLE_NFPSEL_LEN		4
+#define CLE_PRIORITY_POS	5
+#define CLE_PRIORITY_LEN	3
+
+#define JMP_ABS			0
+#define JMP_REL			1
+#define JMP_FW			0
+#define JMP_BW			1
+
+enum xgene_cle_ptree_nodes {
+	PKT_TYPE_NODE,
+	PKT_PROT_NODE,
+	RSS_IPV4_TCP_NODE,
+	RSS_IPV4_UDP_NODE,
+	RSS_IPV4_OTHERS_NODE,
+	LAST_NODE,
+	MAX_NODES
+};
+
+enum xgene_cle_byte_store {
+	NO_BYTE,
+	FIRST_BYTE,
+	SECOND_BYTE,
+	BOTH_BYTES
+};
+
+/* Preclassification operation types */
+enum xgene_cle_node_type {
+	INV,
+	KN,
+	EWDN,
+	RES_NODE
+};
+
+/* Preclassification operation types */
+enum xgene_cle_op_type {
+	EQT,
+	NEQT,
+	LTEQT,
+	GTEQT,
+	AND,
+	NAND
+};
+
+enum xgene_cle_parser {
+	PARSER0,
+	PARSER1,
+	PARSER2,
+	PARSER_ALL
+};
+
+#define XGENE_CLE_DRAM(type)	(((type) & 0xf) << 28)
+enum xgene_cle_dram_type {
+	PKT_RAM,
+	RSS_IDT,
+	RSS_IPV4_HASH_SKEY,
+	PTREE_RAM = 0xc,
+	AVL_RAM,
+	DB_RAM
+};
+
+enum xgene_cle_cmd_type {
+	CLE_CMD_WR = 1,
+	CLE_CMD_RD = 2,
+	CLE_CMD_AVL_ADD = 8,
+	CLE_CMD_AVL_DEL = 16,
+	CLE_CMD_AVL_SRCH = 32
+};
+
+enum xgene_cle_ipv4_rss_hashtype {
+	RSS_IPV4_8B,
+	RSS_IPV4_12B,
+};
+
+enum xgene_cle_prot_type {
+	XGENE_CLE_TCP,
+	XGENE_CLE_UDP,
+	XGENE_CLE_ESP,
+	XGENE_CLE_OTHER
+};
+
+enum xgene_cle_prot_version {
+	XGENE_CLE_IPV4,
+};
+
+enum xgene_cle_ptree_dbptrs {
+	DB_RES_DROP,
+	DB_RES_DEF,
+	DB_RES_ACCEPT,
+	DB_MAX_PTRS
+};
+
+/* RSS sideband signal info */
+#define SB_IPFRAG_POS	0
+#define SB_IPFRAG_LEN	1
+#define SB_IPPROT_POS	1
+#define SB_IPPROT_LEN	2
+#define SB_IPVER_POS	3
+#define SB_IPVER_LEN	1
+#define SB_HDRLEN_POS	4
+#define SB_HDRLEN_LEN	12
+
+/* RSS indirection table */
+#define XGENE_CLE_IDT_ENTRIES	128
+#define IDT_DSTQID_POS		0
+#define IDT_DSTQID_LEN		12
+#define IDT_FPSEL_POS		12
+#define IDT_FPSEL_LEN		5
+#define IDT_NFPSEL_POS		17
+#define IDT_NFPSEL_LEN		5
+#define IDT_FPSEL1_POS		12
+#define IDT_FPSEL1_LEN		4
+#define IDT_NFPSEL1_POS		16
+#define IDT_NFPSEL1_LEN		4
+
+struct xgene_cle_ptree_branch {
+	bool valid;
+	u16 next_packet_pointer;
+	bool jump_bw;
+	bool jump_rel;
+	u8 operation;
+	u16 next_node;
+	u8 next_branch;
+	u16 data;
+	u16 mask;
+};
+
+struct xgene_cle_ptree_ewdn {
+	u8 node_type;
+	bool last_node;
+	bool hdr_len_store;
+	u8 hdr_extn;
+	u8 byte_store;
+	u8 search_byte_store;
+	u16 result_pointer;
+	u8 num_branches;
+	struct xgene_cle_ptree_branch branch[6];
+};
+
+struct xgene_cle_ptree_key {
+	u8 priority;
+	u16 result_pointer;
+};
+
+struct xgene_cle_ptree_kn {
+	u8 node_type;
+	u8 num_keys;
+	struct xgene_cle_ptree_key key[32];
+};
+
+struct xgene_cle_dbptr {
+	u8 split_boundary;
+	u8 mirror_nxtfpsel;
+	u8 mirror_fpsel;
+	u16 mirror_dstqid;
+	u8 drop;
+	u8 mirror;
+	u8 hdr_data_split;
+	u64 hopinfomsbs;
+	u8 DR;
+	u8 HR;
+	u64 hopinfomlsbs;
+	u16 h0enq_num;
+	u8 h0fpsel;
+	u8 nxtfpsel;
+	u8 fpsel;
+	u16 dstqid;
+	u8 cle_priority;
+	u8 cle_flowgroup;
+	u8 cle_perflow;
+	u8 cle_insert_timestamp;
+	u8 stash;
+	u8 in;
+	u8 perprioen;
+	u8 perflowgroupen;
+	u8 perflowen;
+	u8 selhash;
+	u8 selhdrext;
+	u8 mirror_nxtfpsel_msb;
+	u8 mirror_fpsel_msb;
+	u8 hfpsel_msb;
+	u8 nxtfpsel_msb;
+	u8 fpsel_msb;
+};
+
+struct xgene_cle_ptree {
+	struct xgene_cle_ptree_kn *kn;
+	struct xgene_cle_dbptr *dbptr;
+	u32 num_kn;
+	u32 num_dbptr;
+	u32 start_node;
+	u32 start_pkt;
+	u32 start_dbptr;
+};
+
+struct xgene_enet_cle {
+	void __iomem *base;
+	struct xgene_cle_ptree ptree;
+	enum xgene_cle_parser active_parser;
+	u32 parsers;
+	u32 max_nodes;
+	u32 max_dbptrs;
+	u32 jump_bytes;
+};
+
+extern const struct xgene_cle_ops xgene_cle3in_ops;
+
+#endif /* __XGENE_ENET_CLE_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
new file mode 100644
index 0000000..4f50f11
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -0,0 +1,377 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/ethtool.h>
+#include "xgene_enet_main.h"
+
+struct xgene_gstrings_stats {
+	char name[ETH_GSTRING_LEN];
+	int offset;
+	u32 addr;
+	u32 mask;
+};
+
+#define XGENE_STAT(m) { #m, offsetof(struct rtnl_link_stats64, m) }
+#define XGENE_EXTD_STAT(s, a, m)		\
+		{			\
+		.name = #s,		\
+		.addr = a ## _ADDR,	\
+		.mask = m		\
+		}
+
+static const struct xgene_gstrings_stats gstrings_stats[] = {
+	XGENE_STAT(rx_packets),
+	XGENE_STAT(tx_packets),
+	XGENE_STAT(rx_bytes),
+	XGENE_STAT(tx_bytes),
+	XGENE_STAT(rx_errors),
+	XGENE_STAT(tx_errors),
+	XGENE_STAT(rx_length_errors),
+	XGENE_STAT(rx_crc_errors),
+	XGENE_STAT(rx_frame_errors),
+	XGENE_STAT(rx_fifo_errors)
+};
+
+static const struct xgene_gstrings_stats gstrings_extd_stats[] = {
+	XGENE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64, 31),
+	XGENE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127, 31),
+	XGENE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255, 31),
+	XGENE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511, 31),
+	XGENE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K, 31),
+	XGENE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX, 31),
+	XGENE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV, 31),
+	XGENE_EXTD_STAT(rx_fcs_error_cntr, RFCS, 16),
+	XGENE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA, 31),
+	XGENE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA, 31),
+	XGENE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF, 16),
+	XGENE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF, 16),
+	XGENE_EXTD_STAT(rx_unk_opcode_cntr, RXUO, 16),
+	XGENE_EXTD_STAT(rx_align_err_cntr, RALN, 16),
+	XGENE_EXTD_STAT(rx_frame_len_err_cntr, RFLR, 16),
+	XGENE_EXTD_STAT(rx_frame_len_err_recov_cntr, DUMP, 0),
+	XGENE_EXTD_STAT(rx_code_err_cntr, RCDE, 16),
+	XGENE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE, 16),
+	XGENE_EXTD_STAT(rx_undersize_pkt_cntr, RUND, 16),
+	XGENE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR, 16),
+	XGENE_EXTD_STAT(rx_fragments_cntr, RFRG, 16),
+	XGENE_EXTD_STAT(rx_jabber_cntr, RJBR, 16),
+	XGENE_EXTD_STAT(rx_jabber_recov_cntr, DUMP, 0),
+	XGENE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP, 16),
+	XGENE_EXTD_STAT(rx_overrun_cntr, DUMP, 0),
+	XGENE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA, 31),
+	XGENE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA, 31),
+	XGENE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF, 16),
+	XGENE_EXTD_STAT(tx_defer_pkt_cntr, TDFR, 31),
+	XGENE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF, 31),
+	XGENE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL, 31),
+	XGENE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL, 31),
+	XGENE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL, 31),
+	XGENE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL, 31),
+	XGENE_EXTD_STAT(tx_total_col_cntr, TNCL, 31),
+	XGENE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH, 16),
+	XGENE_EXTD_STAT(tx_drop_frame_cntr, TDRP, 16),
+	XGENE_EXTD_STAT(tx_jabber_frame_cntr, TJBR, 12),
+	XGENE_EXTD_STAT(tx_fcs_error_cntr, TFCS, 12),
+	XGENE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF, 12),
+	XGENE_EXTD_STAT(tx_oversize_frame_cntr, TOVR, 12),
+	XGENE_EXTD_STAT(tx_undersize_frame_cntr, TUND, 12),
+	XGENE_EXTD_STAT(tx_fragments_cntr, TFRG, 12),
+	XGENE_EXTD_STAT(tx_underrun_cntr, DUMP, 0)
+};
+
+#define XGENE_STATS_LEN		ARRAY_SIZE(gstrings_stats)
+#define XGENE_EXTD_STATS_LEN	ARRAY_SIZE(gstrings_extd_stats)
+#define RFCS_IDX		7
+#define RALN_IDX		13
+#define RFLR_IDX		14
+#define FALSE_RFLR_IDX		15
+#define RUND_IDX		18
+#define FALSE_RJBR_IDX		22
+#define RX_OVERRUN_IDX		24
+#define TFCS_IDX		38
+#define TFRG_IDX		42
+#define TX_UNDERRUN_IDX		43
+
+static void xgene_get_drvinfo(struct net_device *ndev,
+			      struct ethtool_drvinfo *info)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct platform_device *pdev = pdata->pdev;
+
+	strcpy(info->driver, "xgene_enet");
+	strcpy(info->version, XGENE_DRV_VERSION);
+	snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
+	sprintf(info->bus_info, "%s", pdev->name);
+}
+
+static int xgene_get_link_ksettings(struct net_device *ndev,
+				    struct ethtool_link_ksettings *cmd)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+	u32 supported;
+
+	if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+		if (phydev == NULL)
+			return -ENODEV;
+
+		phy_ethtool_ksettings_get(phydev, cmd);
+
+		return 0;
+	} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+		if (pdata->mdio_driver) {
+			if (!phydev)
+				return -ENODEV;
+
+			phy_ethtool_ksettings_get(phydev, cmd);
+
+			return 0;
+		}
+
+		supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+			SUPPORTED_MII;
+		ethtool_convert_legacy_u32_to_link_mode(
+			cmd->link_modes.supported,
+			supported);
+		ethtool_convert_legacy_u32_to_link_mode(
+			cmd->link_modes.advertising,
+			supported);
+
+		cmd->base.speed = SPEED_1000;
+		cmd->base.duplex = DUPLEX_FULL;
+		cmd->base.port = PORT_MII;
+		cmd->base.autoneg = AUTONEG_ENABLE;
+	} else {
+		supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+		ethtool_convert_legacy_u32_to_link_mode(
+			cmd->link_modes.supported,
+			supported);
+		ethtool_convert_legacy_u32_to_link_mode(
+			cmd->link_modes.advertising,
+			supported);
+
+		cmd->base.speed = SPEED_10000;
+		cmd->base.duplex = DUPLEX_FULL;
+		cmd->base.port = PORT_FIBRE;
+		cmd->base.autoneg = AUTONEG_DISABLE;
+	}
+
+	return 0;
+}
+
+static int xgene_set_link_ksettings(struct net_device *ndev,
+				    const struct ethtool_link_ksettings *cmd)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+
+	if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+		if (!phydev)
+			return -ENODEV;
+
+		return phy_ethtool_ksettings_set(phydev, cmd);
+	}
+
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+		if (pdata->mdio_driver) {
+			if (!phydev)
+				return -ENODEV;
+
+			return phy_ethtool_ksettings_set(phydev, cmd);
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+	int i;
+	u8 *p = data;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < XGENE_STATS_LEN; i++) {
+		memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+
+	for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+		memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+}
+
+static int xgene_get_sset_count(struct net_device *ndev, int sset)
+{
+	if (sset != ETH_SS_STATS)
+		return -EINVAL;
+
+	return XGENE_STATS_LEN + XGENE_EXTD_STATS_LEN;
+}
+
+static void xgene_get_extd_stats(struct xgene_enet_pdata *pdata)
+{
+	u32 rx_drop, tx_drop;
+	u32 mask, tmp;
+	int i;
+
+	for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+		tmp = xgene_enet_rd_stat(pdata, gstrings_extd_stats[i].addr);
+		if (gstrings_extd_stats[i].mask) {
+			mask = GENMASK(gstrings_extd_stats[i].mask - 1, 0);
+			pdata->extd_stats[i] += (tmp & mask);
+		}
+	}
+
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+		/* Errata 10GE_10 - SW should intepret RALN as 0 */
+		pdata->extd_stats[RALN_IDX] = 0;
+	} else {
+		/* Errata ENET_15 - Fixes RFCS, RFLR, TFCS counter */
+		pdata->extd_stats[RFCS_IDX] -= pdata->extd_stats[RALN_IDX];
+		pdata->extd_stats[RFLR_IDX] -= pdata->extd_stats[RUND_IDX];
+		pdata->extd_stats[TFCS_IDX] -= pdata->extd_stats[TFRG_IDX];
+	}
+
+	pdata->mac_ops->get_drop_cnt(pdata, &rx_drop, &tx_drop);
+	pdata->extd_stats[RX_OVERRUN_IDX] += rx_drop;
+	pdata->extd_stats[TX_UNDERRUN_IDX] += tx_drop;
+
+	/* Errata 10GE_8 -  Update Frame recovered from Errata 10GE_8/ENET_11 */
+	pdata->extd_stats[FALSE_RFLR_IDX] = pdata->false_rflr;
+	/* Errata ENET_15 - Jabber Frame recov'ed from Errata 10GE_10/ENET_15 */
+	pdata->extd_stats[FALSE_RJBR_IDX] = pdata->vlan_rjbr;
+}
+
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata)
+{
+	pdata->extd_stats = devm_kmalloc_array(&pdata->pdev->dev,
+			XGENE_EXTD_STATS_LEN, sizeof(u64), GFP_KERNEL);
+	if (!pdata->extd_stats)
+		return -ENOMEM;
+
+	xgene_get_extd_stats(pdata);
+	memset(pdata->extd_stats, 0, XGENE_EXTD_STATS_LEN * sizeof(u64));
+
+	return 0;
+}
+
+static void xgene_get_ethtool_stats(struct net_device *ndev,
+				    struct ethtool_stats *dummy,
+				    u64 *data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct rtnl_link_stats64 stats;
+	int i;
+
+	dev_get_stats(ndev, &stats);
+	for (i = 0; i < XGENE_STATS_LEN; i++)
+		data[i] = *(u64 *)((char *)&stats + gstrings_stats[i].offset);
+
+	xgene_get_extd_stats(pdata);
+	for (i = 0; i < XGENE_EXTD_STATS_LEN; i++)
+		data[i + XGENE_STATS_LEN] = pdata->extd_stats[i];
+}
+
+static void xgene_get_pauseparam(struct net_device *ndev,
+				 struct ethtool_pauseparam *pp)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+
+	pp->autoneg = pdata->pause_autoneg;
+	pp->tx_pause = pdata->tx_pause;
+	pp->rx_pause = pdata->rx_pause;
+}
+
+static int xgene_set_pauseparam(struct net_device *ndev,
+				struct ethtool_pauseparam *pp)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+	u32 oldadv, newadv;
+
+	if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
+	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+		if (!phydev)
+			return -EINVAL;
+
+		if (!(phydev->supported & SUPPORTED_Pause) ||
+		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+		     pp->rx_pause != pp->tx_pause))
+			return -EINVAL;
+
+		pdata->pause_autoneg = pp->autoneg;
+		pdata->tx_pause = pp->tx_pause;
+		pdata->rx_pause = pp->rx_pause;
+
+		oldadv = phydev->advertising;
+		newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+		if (pp->rx_pause)
+			newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+		if (pp->tx_pause)
+			newadv ^= ADVERTISED_Asym_Pause;
+
+		if (oldadv ^ newadv) {
+			phydev->advertising = newadv;
+
+			if (phydev->autoneg)
+				return phy_start_aneg(phydev);
+
+			if (!pp->autoneg) {
+				pdata->mac_ops->flowctl_tx(pdata,
+							   pdata->tx_pause);
+				pdata->mac_ops->flowctl_rx(pdata,
+							   pdata->rx_pause);
+			}
+		}
+
+	} else {
+		if (pp->autoneg)
+			return -EINVAL;
+
+		pdata->tx_pause = pp->tx_pause;
+		pdata->rx_pause = pp->rx_pause;
+
+		pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
+		pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops xgene_ethtool_ops = {
+	.get_drvinfo = xgene_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_strings = xgene_get_strings,
+	.get_sset_count = xgene_get_sset_count,
+	.get_ethtool_stats = xgene_get_ethtool_stats,
+	.get_link_ksettings = xgene_get_link_ksettings,
+	.set_link_ksettings = xgene_set_link_ksettings,
+	.get_pauseparam = xgene_get_pauseparam,
+	.set_pauseparam = xgene_set_pauseparam
+};
+
+void xgene_enet_set_ethtool_ops(struct net_device *ndev)
+{
+	ndev->ethtool_ops = &xgene_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
new file mode 100644
index 0000000..078a04d
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -0,0 +1,1041 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	u64 addr = ring->dma;
+	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
+
+	ring_cfg[4] |= (1 << SELTHRSH_POS) &
+			CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
+	ring_cfg[3] |= ACCEPTLERR;
+	ring_cfg[2] |= QCOHERENT;
+
+	addr >>= 8;
+	ring_cfg[2] |= (addr << RINGADDRL_POS) &
+			CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
+	addr >>= RINGADDRL_LEN;
+	ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
+	ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
+			CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	bool is_bufpool;
+	u32 val;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+	ring_cfg[4] |= (val << RINGTYPE_POS) &
+			CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
+
+	if (is_bufpool) {
+		ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
+				CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
+	}
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+
+	ring_cfg[3] |= RECOMBBUF;
+	ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
+			CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
+	ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+				 u32 offset, u32 data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
+				 u32 offset, u32 *data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	*data = ioread32(pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+	int i;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+				     ring->state[i]);
+	}
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	memset(ring->state, 0, sizeof(ring->state));
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	xgene_enet_ring_set_type(ring);
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
+	    xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
+		xgene_enet_ring_set_recombbuf(ring);
+
+	xgene_enet_ring_init(ring);
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id_val, ring_id_buf;
+	bool is_bufpool;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+	ring_id_val = ring->id & GENMASK(9, 0);
+	ring_id_val |= OVERWRITE;
+
+	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+	ring_id_buf |= PREFETCH_BUF_EN;
+	if (is_bufpool)
+		ring_id_buf |= IS_BUFFER_POOL;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id;
+
+	ring_id = ring->id | OVERWRITE;
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+				    struct xgene_enet_desc_ring *ring)
+{
+	u32 size = ring->size;
+	u32 i, data;
+	bool is_bufpool;
+
+	xgene_enet_clr_ring_state(ring);
+	xgene_enet_set_ring_state(ring);
+	xgene_enet_set_ring_id(ring);
+
+	ring->slots = xgene_enet_get_numslots(ring->id, size);
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+		return ring;
+
+	for (i = 0; i < ring->slots; i++)
+		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
+	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
+	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
+
+	return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+	u32 data;
+	bool is_bufpool;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+		goto out;
+
+	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
+	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
+	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
+
+out:
+	xgene_enet_clr_desc_ring_id(ring);
+	xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+	iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+	u32 __iomem *cmd_base = ring->cmd_base;
+	u32 ring_state, num_msgs;
+
+	ring_state = ioread32(&cmd_base[1]);
+	num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+	return num_msgs;
+}
+
+void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
+			    enum xgene_enet_err_code status)
+{
+	switch (status) {
+	case INGRESS_CRC:
+		ring->rx_crc_errors++;
+		break;
+	case INGRESS_CHECKSUM:
+	case INGRESS_CHECKSUM_COMPUTE:
+		ring->rx_errors++;
+		break;
+	case INGRESS_TRUNC_FRAME:
+		ring->rx_frame_errors++;
+		break;
+	case INGRESS_PKT_LEN:
+		ring->rx_length_errors++;
+		break;
+	case INGRESS_PKT_UNDER:
+		ring->rx_frame_errors++;
+		break;
+	case INGRESS_FIFO_OVERRUN:
+		ring->rx_fifo_errors++;
+		break;
+	default:
+		break;
+	}
+}
+
+static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_ring_if_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
+{
+	void __iomem *addr, *wr, *cmd, *cmd_done;
+	struct net_device *ndev = pdata->ndev;
+	u8 wait = 10;
+	u32 done;
+
+	if (pdata->mdio_driver && ndev->phydev &&
+	    phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+		struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+		return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data);
+	}
+
+	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
+	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+	spin_lock(&pdata->mac_lock);
+	iowrite32(wr_addr, addr);
+	iowrite32(wr_data, wr);
+	iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
+			   wr_addr, wr_data);
+
+	iowrite32(0, cmd);
+	spin_unlock(&pdata->mac_lock);
+}
+
+static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->eth_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
+{
+	void __iomem *addr, *rd, *cmd, *cmd_done;
+	struct net_device *ndev = pdata->ndev;
+	u32 done, rd_data;
+	u8 wait = 10;
+
+	if (pdata->mdio_driver && ndev->phydev &&
+	    phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+		struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+		return xgene_mdio_rd_mac(bus->priv, rd_addr);
+	}
+
+	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
+	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+	spin_lock(&pdata->mac_lock);
+	iowrite32(rd_addr, addr);
+	iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr);
+
+	rd_data = ioread32(rd);
+	iowrite32(0, cmd);
+	spin_unlock(&pdata->mac_lock);
+
+	return rd_data;
+}
+
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
+{
+	void __iomem *addr, *rd, *cmd, *cmd_done;
+	u32 done, rd_data;
+	u8 wait = 10;
+
+	addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
+	rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
+	cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
+
+	spin_lock(&pdata->stats_lock);
+	iowrite32(rd_addr, addr);
+	iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n",
+			   rd_addr);
+
+	rd_data = ioread32(rd);
+	iowrite32(0, cmd);
+	spin_unlock(&pdata->stats_lock);
+
+	return rd_data;
+}
+
+static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+{
+	u32 addr0, addr1;
+	u8 *dev_addr = pdata->ndev->dev_addr;
+
+	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+		(dev_addr[1] << 8) | dev_addr[0];
+	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+	xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
+	xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
+}
+
+static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	u32 data;
+	u8 wait = 10;
+
+	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
+	do {
+		usleep_range(100, 110);
+		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
+	} while ((data != 0xffffffff) && wait--);
+
+	if (data != 0xffffffff) {
+		netdev_err(ndev, "Failed to release memory from shutdown\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
+}
+
+static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	if (dev->of_node) {
+		struct clk *parent = clk_get_parent(pdata->clk);
+
+		switch (pdata->phy_speed) {
+		case SPEED_10:
+			clk_set_rate(parent, 2500000);
+			break;
+		case SPEED_100:
+			clk_set_rate(parent, 25000000);
+			break;
+		default:
+			clk_set_rate(parent, 125000000);
+			break;
+		}
+	}
+#ifdef CONFIG_ACPI
+	else {
+		switch (pdata->phy_speed) {
+		case SPEED_10:
+			acpi_evaluate_object(ACPI_HANDLE(dev),
+					     "S10", NULL, NULL);
+			break;
+		case SPEED_100:
+			acpi_evaluate_object(ACPI_HANDLE(dev),
+					     "S100", NULL, NULL);
+			break;
+		default:
+			acpi_evaluate_object(ACPI_HANDLE(dev),
+					     "S1G", NULL, NULL);
+			break;
+		}
+	}
+#endif
+}
+
+static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
+{
+	u32 icm0, icm2, mc2;
+	u32 intf_ctl, rgmii, value;
+
+	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
+	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
+	mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
+	intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
+	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
+
+	switch (pdata->phy_speed) {
+	case SPEED_10:
+		ENET_INTERFACE_MODE2_SET(&mc2, 1);
+		intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
+		CFG_MACMODE_SET(&icm0, 0);
+		CFG_WAITASYNCRD_SET(&icm2, 500);
+		rgmii &= ~CFG_SPEED_1250;
+		break;
+	case SPEED_100:
+		ENET_INTERFACE_MODE2_SET(&mc2, 1);
+		intf_ctl &= ~ENET_GHD_MODE;
+		intf_ctl |= ENET_LHD_MODE;
+		CFG_MACMODE_SET(&icm0, 1);
+		CFG_WAITASYNCRD_SET(&icm2, 80);
+		rgmii &= ~CFG_SPEED_1250;
+		break;
+	default:
+		ENET_INTERFACE_MODE2_SET(&mc2, 2);
+		intf_ctl &= ~ENET_LHD_MODE;
+		intf_ctl |= ENET_GHD_MODE;
+		CFG_MACMODE_SET(&icm0, 2);
+		CFG_WAITASYNCRD_SET(&icm2, 0);
+		CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
+		CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
+		rgmii |= CFG_SPEED_1250;
+
+		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
+		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
+		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
+		break;
+	}
+
+	mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
+	xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+	xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
+	xgene_enet_configure_clock(pdata);
+
+	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
+	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
+}
+
+static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
+{
+	xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+}
+
+static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
+				       bool enable)
+{
+	u32 data;
+
+	xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
+
+	if (enable)
+		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
+	else
+		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
+
+	xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
+}
+
+static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+
+	if (enable)
+		data |= TX_FLOW_EN;
+	else
+		data &= ~TX_FLOW_EN;
+
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
+
+	pdata->mac_ops->enable_tx_pause(pdata, enable);
+}
+
+static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+
+	if (enable)
+		data |= RX_FLOW_EN;
+	else
+		data &= ~RX_FLOW_EN;
+
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
+}
+
+static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
+{
+	u32 value;
+
+	if (!pdata->mdio_driver)
+		xgene_gmac_reset(pdata);
+
+	xgene_gmac_set_speed(pdata);
+	xgene_gmac_set_mac_addr(pdata);
+
+	/* Adjust MDC clock frequency */
+	value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
+	MGMT_CLOCK_SEL_SET(&value, 7);
+	xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
+
+	/* Enable drop if bufpool not available */
+	xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
+	value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+	xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
+
+	/* Rtype should be copied from FP */
+	xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
+
+	/* Configure HW pause frame generation */
+	xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
+	value = (DEF_QUANTA << 16) | (value & 0xFFFF);
+	xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
+
+	xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
+	xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
+
+	xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
+	xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
+
+	/* Rx-Tx traffic resume */
+	xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
+
+	xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
+	value &= ~TX_DV_GATE_EN0;
+	value &= ~RX_DV_GATE_EN0;
+	value |= RESUME_RX0;
+	xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
+
+	xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
+}
+
+static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+				    u32 *rx, u32 *tx)
+{
+	u32 count;
+
+	xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count);
+	*rx = ICM_DROP_COUNT(count);
+	*tx = ECM_DROP_COUNT(count);
+	/* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
+	xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count);
+}
+
+static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
+{
+	u32 val = 0xffffffff;
+
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
+}
+
+static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
+				  u32 dst_ring_num, u16 bufpool_id,
+				  u16 nxtbufpool_id)
+{
+	u32 cb;
+	u32 fpsel, nxtfpsel;
+
+	fpsel = xgene_enet_get_fpsel(bufpool_id);
+	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
+
+	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
+	cb |= CFG_CLE_BYPASS_EN0;
+	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+	CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
+	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
+
+	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
+	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
+	CFG_CLE_FPSEL0_SET(&cb, fpsel);
+	CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
+	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
+}
+
+static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
+}
+
+static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
+}
+
+static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
+}
+
+static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+	xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
+}
+
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
+{
+	if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
+		return false;
+
+	if (ioread32(p->ring_csr_addr + SRST_ADDR))
+		return false;
+
+	return true;
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	if (!xgene_ring_mgr_init(pdata))
+		return -ENODEV;
+
+	if (pdata->mdio_driver) {
+		xgene_enet_config_ring_if_assoc(pdata);
+		return 0;
+	}
+
+	if (dev->of_node) {
+		clk_prepare_enable(pdata->clk);
+		udelay(5);
+		clk_disable_unprepare(pdata->clk);
+		udelay(5);
+		clk_prepare_enable(pdata->clk);
+		udelay(5);
+	} else {
+#ifdef CONFIG_ACPI
+		if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
+			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+					     "_RST", NULL, NULL);
+		} else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
+					 "_INI")) {
+			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+					     "_INI", NULL, NULL);
+		}
+#endif
+	}
+
+	xgene_enet_ecc_init(pdata);
+	xgene_enet_config_ring_if_assoc(pdata);
+
+	return 0;
+}
+
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+			     struct xgene_enet_desc_ring *ring)
+{
+	u32 addr, data;
+
+	if (xgene_enet_is_bufpool(ring->id)) {
+		addr = ENET_CFGSSQMIFPRESET_ADDR;
+		data = BIT(xgene_enet_get_fpsel(ring->id));
+	} else {
+		addr = ENET_CFGSSQMIWQRESET_ADDR;
+		data = BIT(xgene_enet_ring_bufnum(ring->id));
+	}
+
+	xgene_enet_wr_ring_if(pdata, addr, data);
+}
+
+static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	if (dev->of_node) {
+		if (!IS_ERR(pdata->clk))
+			clk_disable_unprepare(pdata->clk);
+	}
+}
+
+static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+	u16 lcladv, rmtadv = 0;
+	u32 rx_pause, tx_pause;
+	u8 flowctl = 0;
+
+	if (!phydev->duplex || !pdata->pause_autoneg)
+		return 0;
+
+	if (pdata->tx_pause)
+		flowctl |= FLOW_CTRL_TX;
+
+	if (pdata->rx_pause)
+		flowctl |= FLOW_CTRL_RX;
+
+	lcladv = mii_advertise_flowctrl(flowctl);
+
+	if (phydev->pause)
+		rmtadv = LPA_PAUSE_CAP;
+
+	if (phydev->asym_pause)
+		rmtadv |= LPA_PAUSE_ASYM;
+
+	flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+	tx_pause = !!(flowctl & FLOW_CTRL_TX);
+	rx_pause = !!(flowctl & FLOW_CTRL_RX);
+
+	if (tx_pause != pdata->tx_pause) {
+		pdata->tx_pause = tx_pause;
+		pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
+	}
+
+	if (rx_pause != pdata->rx_pause) {
+		pdata->rx_pause = rx_pause;
+		pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
+	}
+
+	return 0;
+}
+
+static void xgene_enet_adjust_link(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
+	struct phy_device *phydev = ndev->phydev;
+
+	if (phydev->link) {
+		if (pdata->phy_speed != phydev->speed) {
+			pdata->phy_speed = phydev->speed;
+			mac_ops->set_speed(pdata);
+			mac_ops->rx_enable(pdata);
+			mac_ops->tx_enable(pdata);
+			phy_print_status(phydev);
+		}
+
+		xgene_enet_flowctrl_cfg(ndev);
+	} else {
+		mac_ops->rx_disable(pdata);
+		mac_ops->tx_disable(pdata);
+		pdata->phy_speed = SPEED_UNKNOWN;
+		phy_print_status(phydev);
+	}
+}
+
+#ifdef CONFIG_ACPI
+static struct acpi_device *acpi_phy_find_device(struct device *dev)
+{
+	struct fwnode_reference_args args;
+	struct fwnode_handle *fw_node;
+	int status;
+
+	fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
+	status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
+						  &args);
+	if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) {
+		dev_dbg(dev, "No matching phy in ACPI table\n");
+		return NULL;
+	}
+
+	return to_acpi_device_node(args.fwnode);
+}
+#endif
+
+int xgene_enet_phy_connect(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device_node *np;
+	struct phy_device *phy_dev;
+	struct device *dev = &pdata->pdev->dev;
+	int i;
+
+	if (dev->of_node) {
+		for (i = 0 ; i < 2; i++) {
+			np = of_parse_phandle(dev->of_node, "phy-handle", i);
+			phy_dev = of_phy_connect(ndev, np,
+						 &xgene_enet_adjust_link,
+						 0, pdata->phy_mode);
+			of_node_put(np);
+			if (phy_dev)
+				break;
+		}
+
+		if (!phy_dev) {
+			netdev_err(ndev, "Could not connect to PHY\n");
+			return -ENODEV;
+		}
+	} else {
+#ifdef CONFIG_ACPI
+		struct acpi_device *adev = acpi_phy_find_device(dev);
+		if (adev)
+			phy_dev = adev->driver_data;
+		else
+			phy_dev = NULL;
+
+		if (!phy_dev ||
+		    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+				       pdata->phy_mode)) {
+			netdev_err(ndev, "Could not connect to PHY\n");
+			return  -ENODEV;
+		}
+#else
+		return -ENODEV;
+#endif
+	}
+
+	pdata->phy_speed = SPEED_UNKNOWN;
+	phy_dev->supported &= ~SUPPORTED_10baseT_Half &
+			      ~SUPPORTED_100baseT_Half &
+			      ~SUPPORTED_1000baseT_Half;
+	phy_dev->supported |= SUPPORTED_Pause |
+			      SUPPORTED_Asym_Pause;
+	phy_dev->advertising = phy_dev->supported;
+
+	return 0;
+}
+
+static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
+				  struct mii_bus *mdio)
+{
+	struct device *dev = &pdata->pdev->dev;
+	struct net_device *ndev = pdata->ndev;
+	struct phy_device *phy;
+	struct device_node *child_np;
+	struct device_node *mdio_np = NULL;
+	u32 phy_addr;
+	int ret;
+
+	if (dev->of_node) {
+		for_each_child_of_node(dev->of_node, child_np) {
+			if (of_device_is_compatible(child_np,
+						    "apm,xgene-mdio")) {
+				mdio_np = child_np;
+				break;
+			}
+		}
+
+		if (!mdio_np) {
+			netdev_dbg(ndev, "No mdio node in the dts\n");
+			return -ENXIO;
+		}
+
+		return of_mdiobus_register(mdio, mdio_np);
+	}
+
+	/* Mask out all PHYs from auto probing. */
+	mdio->phy_mask = ~0;
+
+	/* Register the MDIO bus */
+	ret = mdiobus_register(mdio);
+	if (ret)
+		return ret;
+
+	ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
+	if (ret)
+		ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
+	if (ret)
+		return -EINVAL;
+
+	phy = xgene_enet_phy_register(mdio, phy_addr);
+	if (!phy)
+		return -EIO;
+
+	return ret;
+}
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	struct mii_bus *mdio_bus;
+	int ret;
+
+	mdio_bus = mdiobus_alloc();
+	if (!mdio_bus)
+		return -ENOMEM;
+
+	mdio_bus->name = "APM X-Gene MDIO bus";
+	mdio_bus->read = xgene_mdio_rgmii_read;
+	mdio_bus->write = xgene_mdio_rgmii_write;
+	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
+		 ndev->name);
+
+	mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
+	mdio_bus->parent = &pdata->pdev->dev;
+
+	ret = xgene_mdiobus_register(pdata, mdio_bus);
+	if (ret) {
+		netdev_err(ndev, "Failed to register MDIO bus\n");
+		mdiobus_free(mdio_bus);
+		return ret;
+	}
+	pdata->mdio_bus = mdio_bus;
+
+	ret = xgene_enet_phy_connect(ndev);
+	if (ret)
+		xgene_enet_mdio_remove(pdata);
+
+	return ret;
+}
+
+void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+
+	if (ndev->phydev)
+		phy_disconnect(ndev->phydev);
+}
+
+void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+
+	if (ndev->phydev)
+		phy_disconnect(ndev->phydev);
+
+	mdiobus_unregister(pdata->mdio_bus);
+	mdiobus_free(pdata->mdio_bus);
+	pdata->mdio_bus = NULL;
+}
+
+const struct xgene_mac_ops xgene_gmac_ops = {
+	.init = xgene_gmac_init,
+	.reset = xgene_gmac_reset,
+	.rx_enable = xgene_gmac_rx_enable,
+	.tx_enable = xgene_gmac_tx_enable,
+	.rx_disable = xgene_gmac_rx_disable,
+	.tx_disable = xgene_gmac_tx_disable,
+	.get_drop_cnt = xgene_gmac_get_drop_cnt,
+	.set_speed = xgene_gmac_set_speed,
+	.set_mac_addr = xgene_gmac_set_mac_addr,
+	.set_framesize = xgene_enet_set_frame_size,
+	.enable_tx_pause = xgene_gmac_enable_tx_pause,
+	.flowctl_tx     = xgene_gmac_flowctl_tx,
+	.flowctl_rx     = xgene_gmac_flowctl_rx,
+};
+
+const struct xgene_port_ops xgene_gport_ops = {
+	.reset = xgene_enet_reset,
+	.clear = xgene_enet_clear,
+	.cle_bypass = xgene_enet_cle_bypass,
+	.shutdown = xgene_gport_shutdown,
+};
+
+struct xgene_ring_ops xgene_ring1_ops = {
+	.num_ring_config = NUM_RING_CONFIG,
+	.num_ring_id_shift = 6,
+	.setup = xgene_enet_setup_ring,
+	.clear = xgene_enet_clear_ring,
+	.wr_cmd = xgene_enet_wr_cmd,
+	.len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
new file mode 100644
index 0000000..5d3e18d
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -0,0 +1,450 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_HW_H__
+#define __XGENE_ENET_HW_H__
+
+#include "xgene_enet_main.h"
+
+struct xgene_enet_pdata;
+struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
+
+/* clears and then set bits */
+static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
+{
+	u32 end = start + len - 1;
+	u32 mask = GENMASK(end, start);
+
+	*dst &= ~mask;
+	*dst |= (val << start) & mask;
+}
+
+static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
+{
+	return (val & GENMASK(end, start)) >> start;
+}
+
+enum xgene_enet_rm {
+	RM0,
+	RM1,
+	RM3 = 3
+};
+
+#define CSR_RING_ID		0x0008
+#define OVERWRITE		BIT(31)
+#define IS_BUFFER_POOL		BIT(20)
+#define PREFETCH_BUF_EN		BIT(21)
+#define CSR_RING_ID_BUF		0x000c
+#define CSR_PBM_COAL		0x0014
+#define CSR_PBM_CTICK0		0x0018
+#define CSR_PBM_CTICK1		0x001c
+#define CSR_PBM_CTICK2		0x0020
+#define CSR_PBM_CTICK3		0x0024
+#define CSR_THRESHOLD0_SET1	0x0030
+#define CSR_THRESHOLD1_SET1	0x0034
+#define CSR_RING_NE_INT_MODE	0x017c
+#define CSR_RING_CONFIG		0x006c
+#define CSR_RING_WR_BASE	0x0070
+#define NUM_RING_CONFIG		5
+#define BUFPOOL_MODE		3
+#define INC_DEC_CMD_ADDR	0x002c
+#define UDP_HDR_SIZE		2
+#define BUF_LEN_CODE_2K		0x5000
+
+#define CREATE_MASK(pos, len)		GENMASK((pos)+(len)-1, (pos))
+#define CREATE_MASK_ULL(pos, len)	GENMASK_ULL((pos)+(len)-1, (pos))
+
+/* Empty slot soft signature */
+#define EMPTY_SLOT_INDEX	1
+#define EMPTY_SLOT		~0ULL
+
+#define WORK_DESC_SIZE		32
+#define BUFPOOL_DESC_SIZE	16
+
+#define RING_OWNER_MASK		GENMASK(9, 6)
+#define RING_BUFNUM_MASK	GENMASK(5, 0)
+
+#define SELTHRSH_POS		3
+#define SELTHRSH_LEN		3
+#define RINGADDRL_POS		5
+#define RINGADDRL_LEN		27
+#define RINGADDRH_POS		0
+#define RINGADDRH_LEN		7
+#define RINGSIZE_POS		23
+#define RINGSIZE_LEN		3
+#define RINGTYPE_POS		19
+#define RINGTYPE_LEN		2
+#define RINGMODE_POS		20
+#define RINGMODE_LEN		3
+#define RECOMTIMEOUTL_POS	28
+#define RECOMTIMEOUTL_LEN	4
+#define RECOMTIMEOUTH_POS	0
+#define RECOMTIMEOUTH_LEN	3
+#define NUMMSGSINQ_POS		1
+#define NUMMSGSINQ_LEN		16
+#define ACCEPTLERR		BIT(19)
+#define QCOHERENT		BIT(4)
+#define RECOMBBUF		BIT(27)
+
+#define MAC_OFFSET			0x30
+#define OFFSET_4			0x04
+#define OFFSET_8			0x08
+
+#define BLOCK_ETH_CSR_OFFSET		0x2000
+#define BLOCK_ETH_CLE_CSR_OFFSET	0x6000
+#define BLOCK_ETH_RING_IF_OFFSET	0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET	0xc000
+#define BLOCK_ETH_DIAG_CSR_OFFSET	0xD000
+#define BLOCK_ETH_MAC_OFFSET		0x0000
+#define BLOCK_ETH_STATS_OFFSET		0x0000
+#define BLOCK_ETH_MAC_CSR_OFFSET	0x2800
+
+#define CLKEN_ADDR			0xc208
+#define SRST_ADDR			0xc200
+
+#define MAC_ADDR_REG_OFFSET		0x00
+#define MAC_COMMAND_REG_OFFSET		0x04
+#define MAC_WRITE_REG_OFFSET		0x08
+#define MAC_READ_REG_OFFSET		0x0c
+#define MAC_COMMAND_DONE_REG_OFFSET	0x10
+
+#define STAT_ADDR_REG_OFFSET            0x14
+#define STAT_COMMAND_REG_OFFSET         0x18
+#define STAT_WRITE_REG_OFFSET           0x1c
+#define STAT_READ_REG_OFFSET            0x20
+#define STAT_COMMAND_DONE_REG_OFFSET    0x24
+
+#define PCS_ADDR_REG_OFFSET		0x00
+#define PCS_COMMAND_REG_OFFSET		0x04
+#define PCS_WRITE_REG_OFFSET		0x08
+#define PCS_READ_REG_OFFSET		0x0c
+#define PCS_COMMAND_DONE_REG_OFFSET	0x10
+
+#define MII_MGMT_CONFIG_ADDR		0x20
+#define MII_MGMT_COMMAND_ADDR		0x24
+#define MII_MGMT_ADDRESS_ADDR		0x28
+#define MII_MGMT_CONTROL_ADDR		0x2c
+#define MII_MGMT_STATUS_ADDR		0x30
+#define MII_MGMT_INDICATORS_ADDR	0x34
+
+#define BUSY_MASK			BIT(0)
+#define READ_CYCLE_MASK			BIT(0)
+#define PHY_CONTROL_SET(dst, val)	xgene_set_bits(dst, val, 0, 16)
+
+#define ENET_SPARE_CFG_REG_ADDR		0x0750
+#define RSIF_CONFIG_REG_ADDR		0x0010
+#define RSIF_RAM_DBG_REG0_ADDR		0x0048
+#define RGMII_REG_0_ADDR		0x07e0
+#define CFG_LINK_AGGR_RESUME_0_ADDR	0x07c8
+#define DEBUG_REG_ADDR			0x0700
+#define CFG_BYPASS_ADDR			0x0294
+#define CLE_BYPASS_REG0_0_ADDR		0x0490
+#define CLE_BYPASS_REG1_0_ADDR		0x0494
+#define CFG_RSIF_FPBUFF_TIMEOUT_EN	BIT(31)
+#define RESUME_TX			BIT(0)
+#define CFG_SPEED_1250			BIT(24)
+#define TX_PORT0			BIT(0)
+#define CFG_BYPASS_UNISEC_TX		BIT(2)
+#define CFG_BYPASS_UNISEC_RX		BIT(1)
+#define CFG_CLE_BYPASS_EN0		BIT(31)
+#define CFG_TXCLK_MUXSEL0_SET(dst, val)	xgene_set_bits(dst, val, 29, 3)
+#define CFG_RXCLK_MUXSEL0_SET(dst, val)	xgene_set_bits(dst, val, 26, 3)
+
+#define CFG_CLE_IP_PROTOCOL0_SET(dst, val)	xgene_set_bits(dst, val, 16, 2)
+#define CFG_CLE_IP_HDR_LEN_SET(dst, val)	xgene_set_bits(dst, val, 8, 5)
+#define CFG_CLE_DSTQID0_SET(dst, val)		xgene_set_bits(dst, val, 0, 12)
+#define CFG_CLE_FPSEL0_SET(dst, val)		xgene_set_bits(dst, val, 16, 4)
+#define CFG_CLE_NXTFPSEL0_SET(dst, val)		xgene_set_bits(dst, val, 20, 4)
+#define CFG_MACMODE_SET(dst, val)		xgene_set_bits(dst, val, 18, 2)
+#define CFG_WAITASYNCRD_SET(dst, val)		xgene_set_bits(dst, val, 0, 16)
+#define CFG_CLE_DSTQID0(val)		((val) & GENMASK(11, 0))
+#define CFG_CLE_FPSEL0(val)		(((val) << 16) & GENMASK(19, 16))
+#define CSR_ECM_CFG_0_ADDR		0x0220
+#define CSR_ECM_CFG_1_ADDR		0x0224
+#define CSR_MULTI_DPF0_ADDR		0x0230
+#define RXBUF_PAUSE_THRESH		0x0534
+#define RXBUF_PAUSE_OFF_THRESH		0x0540
+#define DEF_PAUSE_THRES			0x7d
+#define DEF_PAUSE_OFF_THRES		0x6d
+#define DEF_QUANTA			0x8000
+#define NORM_PAUSE_OPCODE		0x0001
+#define PAUSE_XON_EN			BIT(30)
+#define MULTI_DPF_AUTOCTRL		BIT(28)
+#define CFG_CLE_NXTFPSEL0(val)		(((val) << 20) & GENMASK(23, 20))
+#define ICM_CONFIG0_REG_0_ADDR		0x0400
+#define ICM_CONFIG2_REG_0_ADDR		0x0410
+#define ECM_CONFIG0_REG_0_ADDR		0x0500
+#define ECM_CONFIG0_REG_1_ADDR		0x0504
+#define ICM_ECM_DROP_COUNT_REG0_ADDR	0x0508
+#define ICM_ECM_DROP_COUNT_REG1_ADDR	0x050c
+#define RX_DV_GATE_REG_0_ADDR		0x05fc
+#define TX_DV_GATE_EN0			BIT(2)
+#define RX_DV_GATE_EN0			BIT(1)
+#define RESUME_RX0			BIT(0)
+#define ENET_CFGSSQMIFPRESET_ADDR		0x14
+#define ENET_CFGSSQMIWQRESET_ADDR		0x1c
+#define ENET_CFGSSQMIWQASSOC_ADDR		0xe0
+#define ENET_CFGSSQMIFPQASSOC_ADDR		0xdc
+#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR	0xf0
+#define ENET_CFGSSQMIQMLITEWQASSOC_ADDR		0xf4
+#define ENET_CFG_MEM_RAM_SHUTDOWN_ADDR		0x70
+#define ENET_BLOCK_MEM_RDY_ADDR			0x74
+#define MAC_CONFIG_1_ADDR			0x00
+#define MAC_CONFIG_2_ADDR			0x04
+#define MAX_FRAME_LEN_ADDR			0x10
+#define INTERFACE_CONTROL_ADDR			0x38
+#define STATION_ADDR0_ADDR			0x40
+#define STATION_ADDR1_ADDR			0x44
+#define PHY_ADDR_SET(dst, val)			xgene_set_bits(dst, val, 8, 5)
+#define REG_ADDR_SET(dst, val)			xgene_set_bits(dst, val, 0, 5)
+#define ENET_INTERFACE_MODE2_SET(dst, val)	xgene_set_bits(dst, val, 8, 2)
+#define MGMT_CLOCK_SEL_SET(dst, val)		xgene_set_bits(dst, val, 0, 3)
+#define SOFT_RESET1			BIT(31)
+#define TX_EN				BIT(0)
+#define RX_EN				BIT(2)
+#define TX_FLOW_EN			BIT(4)
+#define RX_FLOW_EN			BIT(5)
+#define ENET_LHD_MODE			BIT(25)
+#define ENET_GHD_MODE			BIT(26)
+#define FULL_DUPLEX2			BIT(0)
+#define PAD_CRC				BIT(2)
+#define LENGTH_CHK			BIT(4)
+
+#define TR64_ADDR	0x20
+#define TR127_ADDR	0x21
+#define TR255_ADDR	0x22
+#define TR511_ADDR	0x23
+#define TR1K_ADDR	0x24
+#define TRMAX_ADDR	0x25
+#define TRMGV_ADDR	0x26
+
+#define RFCS_ADDR	0x29
+#define RMCA_ADDR	0x2a
+#define RBCA_ADDR	0x2b
+#define RXCF_ADDR	0x2c
+#define RXPF_ADDR	0x2d
+#define RXUO_ADDR	0x2e
+#define RALN_ADDR	0x2f
+#define RFLR_ADDR	0x30
+#define RCDE_ADDR	0x31
+#define RCSE_ADDR	0x32
+#define RUND_ADDR	0x33
+#define ROVR_ADDR	0x34
+#define RFRG_ADDR	0x35
+#define RJBR_ADDR	0x36
+#define RDRP_ADDR	0x37
+
+#define TMCA_ADDR	0x3a
+#define TBCA_ADDR	0x3b
+#define TXPF_ADDR	0x3c
+#define TDFR_ADDR	0x3d
+#define TEDF_ADDR	0x3e
+#define TSCL_ADDR	0x3f
+#define TMCL_ADDR	0x40
+#define TLCL_ADDR	0x41
+#define TXCL_ADDR	0x42
+#define TNCL_ADDR	0x43
+#define TPFH_ADDR	0x44
+#define TDRP_ADDR	0x45
+#define TJBR_ADDR	0x46
+#define TFCS_ADDR	0x47
+#define TXCF_ADDR	0x48
+#define TOVR_ADDR	0x49
+#define TUND_ADDR	0x4a
+#define TFRG_ADDR	0x4b
+#define DUMP_ADDR	0x27
+
+#define ECM_DROP_COUNT(src)	xgene_get_bits(src, 0, 15)
+#define ICM_DROP_COUNT(src)	xgene_get_bits(src, 16, 31)
+
+#define TSO_IPPROTO_TCP			1
+
+#define USERINFO_POS			0
+#define USERINFO_LEN			32
+#define FPQNUM_POS			32
+#define FPQNUM_LEN			12
+#define ELERR_POS                       46
+#define ELERR_LEN                       2
+#define NV_POS				50
+#define NV_LEN				1
+#define LL_POS				51
+#define LL_LEN				1
+#define LERR_POS			60
+#define LERR_LEN			3
+#define STASH_POS			52
+#define STASH_LEN			2
+#define BUFDATALEN_POS			48
+#define BUFDATALEN_LEN			15
+#define DATAADDR_POS			0
+#define DATAADDR_LEN			42
+#define COHERENT_POS			63
+#define HENQNUM_POS			48
+#define HENQNUM_LEN			12
+#define TYPESEL_POS			44
+#define TYPESEL_LEN			4
+#define ETHHDR_POS			12
+#define ETHHDR_LEN			8
+#define IC_POS				35	/* Insert CRC */
+#define TCPHDR_POS			0
+#define TCPHDR_LEN			6
+#define IPHDR_POS			6
+#define IPHDR_LEN			6
+#define MSS_POS				20
+#define MSS_LEN				2
+#define EC_POS				22	/* Enable checksum */
+#define EC_LEN				1
+#define ET_POS				23	/* Enable TSO */
+#define IS_POS				24	/* IP protocol select */
+#define IS_LEN				1
+#define TYPE_ETH_WORK_MESSAGE_POS	44
+#define LL_BYTES_MSB_POS		56
+#define LL_BYTES_MSB_LEN		8
+#define LL_BYTES_LSB_POS		48
+#define LL_BYTES_LSB_LEN		12
+#define LL_LEN_POS			48
+#define LL_LEN_LEN			8
+#define DATALEN_MASK			GENMASK(11, 0)
+
+#define LAST_BUFFER			(0x7800ULL << BUFDATALEN_POS)
+
+#define TSO_MSS0_POS			0
+#define TSO_MSS0_LEN			14
+#define TSO_MSS1_POS			16
+#define TSO_MSS1_LEN			14
+
+struct xgene_enet_raw_desc {
+	__le64 m0;
+	__le64 m1;
+	__le64 m2;
+	__le64 m3;
+};
+
+struct xgene_enet_raw_desc16 {
+	__le64 m0;
+	__le64 m1;
+};
+
+static inline void xgene_enet_mark_desc_slot_empty(void *desc_slot_ptr)
+{
+	__le64 *desc_slot = desc_slot_ptr;
+
+	desc_slot[EMPTY_SLOT_INDEX] = cpu_to_le64(EMPTY_SLOT);
+}
+
+static inline bool xgene_enet_is_desc_slot_empty(void *desc_slot_ptr)
+{
+	__le64 *desc_slot = desc_slot_ptr;
+
+	return (desc_slot[EMPTY_SLOT_INDEX] == cpu_to_le64(EMPTY_SLOT));
+}
+
+enum xgene_enet_ring_cfgsize {
+	RING_CFGSIZE_512B,
+	RING_CFGSIZE_2KB,
+	RING_CFGSIZE_16KB,
+	RING_CFGSIZE_64KB,
+	RING_CFGSIZE_512KB,
+	RING_CFGSIZE_INVALID
+};
+
+enum xgene_enet_ring_type {
+	RING_DISABLED,
+	RING_REGULAR,
+	RING_BUFPOOL
+};
+
+enum xgene_ring_owner {
+	RING_OWNER_ETH0,
+	RING_OWNER_ETH1,
+	RING_OWNER_CPU = 15,
+	RING_OWNER_INVALID
+};
+
+enum xgene_enet_ring_bufnum {
+	RING_BUFNUM_REGULAR = 0x0,
+	RING_BUFNUM_BUFPOOL = 0x20,
+	RING_BUFNUM_INVALID
+};
+
+enum xgene_enet_err_code {
+	HBF_READ_DATA = 3,
+	HBF_LL_READ = 4,
+	BAD_WORK_MSG = 6,
+	BUFPOOL_TIMEOUT = 15,
+	INGRESS_CRC = 16,
+	INGRESS_CHECKSUM = 17,
+	INGRESS_TRUNC_FRAME = 18,
+	INGRESS_PKT_LEN = 19,
+	INGRESS_PKT_UNDER = 20,
+	INGRESS_FIFO_OVERRUN = 21,
+	INGRESS_CHECKSUM_COMPUTE = 26,
+	ERR_CODE_INVALID
+};
+
+static inline enum xgene_ring_owner xgene_enet_ring_owner(u16 id)
+{
+	return (id & RING_OWNER_MASK) >> 6;
+}
+
+static inline u8 xgene_enet_ring_bufnum(u16 id)
+{
+	return id & RING_BUFNUM_MASK;
+}
+
+static inline bool xgene_enet_is_bufpool(u16 id)
+{
+	return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false;
+}
+
+static inline u8 xgene_enet_get_fpsel(u16 id)
+{
+	if (xgene_enet_is_bufpool(id))
+		return xgene_enet_ring_bufnum(id) - RING_BUFNUM_BUFPOOL;
+
+	return 0;
+}
+
+static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
+{
+	bool is_bufpool = xgene_enet_is_bufpool(id);
+
+	return (is_bufpool) ? size / BUFPOOL_DESC_SIZE :
+		      size / WORK_DESC_SIZE;
+}
+
+void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
+			    enum xgene_enet_err_code status);
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
+void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
+int xgene_enet_phy_connect(struct net_device *ndev);
+void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata);
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr);
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr,
+		       u32 wr_data);
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr);
+
+extern const struct xgene_mac_ops xgene_gmac_ops;
+extern const struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
+
+#endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
new file mode 100644
index 0000000..3b889ef
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -0,0 +1,2202 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
+
+#define RES_ENET_CSR	0
+#define RES_RING_CSR	1
+#define RES_RING_CMD	2
+
+static const struct of_device_id xgene_enet_of_match[];
+static const struct acpi_device_id xgene_enet_acpi_match[];
+
+static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
+{
+	struct xgene_enet_raw_desc16 *raw_desc;
+	int i;
+
+	if (!buf_pool)
+		return;
+
+	for (i = 0; i < buf_pool->slots; i++) {
+		raw_desc = &buf_pool->raw_desc16[i];
+
+		/* Hardware expects descriptor in little endian format */
+		raw_desc->m0 = cpu_to_le64(i |
+				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
+				SET_VAL(STASH, 3));
+	}
+}
+
+static u16 xgene_enet_get_data_len(u64 bufdatalen)
+{
+	u16 hw_len, mask;
+
+	hw_len = GET_VAL(BUFDATALEN, bufdatalen);
+
+	if (unlikely(hw_len == 0x7800)) {
+		return 0;
+	} else if (!(hw_len & BIT(14))) {
+		mask = GENMASK(13, 0);
+		return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
+	} else if (!(hw_len & GENMASK(13, 12))) {
+		mask = GENMASK(11, 0);
+		return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
+	} else {
+		mask = GENMASK(11, 0);
+		return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
+	}
+}
+
+static u16 xgene_enet_set_data_len(u32 size)
+{
+	u16 hw_len;
+
+	hw_len =  (size == SIZE_4K) ? BIT(14) : 0;
+
+	return hw_len;
+}
+
+static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
+				      u32 nbuf)
+{
+	struct xgene_enet_raw_desc16 *raw_desc;
+	struct xgene_enet_pdata *pdata;
+	struct net_device *ndev;
+	dma_addr_t dma_addr;
+	struct device *dev;
+	struct page *page;
+	u32 slots, tail;
+	u16 hw_len;
+	int i;
+
+	if (unlikely(!buf_pool))
+		return 0;
+
+	ndev = buf_pool->ndev;
+	pdata = netdev_priv(ndev);
+	dev = ndev_to_dev(ndev);
+	slots = buf_pool->slots - 1;
+	tail = buf_pool->tail;
+
+	for (i = 0; i < nbuf; i++) {
+		raw_desc = &buf_pool->raw_desc16[tail];
+
+		page = dev_alloc_page();
+		if (unlikely(!page))
+			return -ENOMEM;
+
+		dma_addr = dma_map_page(dev, page, 0,
+					PAGE_SIZE, DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(dev, dma_addr))) {
+			put_page(page);
+			return -ENOMEM;
+		}
+
+		hw_len = xgene_enet_set_data_len(PAGE_SIZE);
+		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+					   SET_VAL(BUFDATALEN, hw_len) |
+					   SET_BIT(COHERENT));
+
+		buf_pool->frag_page[tail] = page;
+		tail = (tail + 1) & slots;
+	}
+
+	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
+	buf_pool->tail = tail;
+
+	return 0;
+}
+
+static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
+				     u32 nbuf)
+{
+	struct sk_buff *skb;
+	struct xgene_enet_raw_desc16 *raw_desc;
+	struct xgene_enet_pdata *pdata;
+	struct net_device *ndev;
+	struct device *dev;
+	dma_addr_t dma_addr;
+	u32 tail = buf_pool->tail;
+	u32 slots = buf_pool->slots - 1;
+	u16 bufdatalen, len;
+	int i;
+
+	ndev = buf_pool->ndev;
+	dev = ndev_to_dev(buf_pool->ndev);
+	pdata = netdev_priv(ndev);
+
+	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
+	len = XGENE_ENET_STD_MTU;
+
+	for (i = 0; i < nbuf; i++) {
+		raw_desc = &buf_pool->raw_desc16[tail];
+
+		skb = netdev_alloc_skb_ip_align(ndev, len);
+		if (unlikely(!skb))
+			return -ENOMEM;
+
+		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			netdev_err(ndev, "DMA mapping error\n");
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		buf_pool->rx_skb[tail] = skb;
+
+		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+					   SET_VAL(BUFDATALEN, bufdatalen) |
+					   SET_BIT(COHERENT));
+		tail = (tail + 1) & slots;
+	}
+
+	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
+	buf_pool->tail = tail;
+
+	return 0;
+}
+
+static u8 xgene_enet_hdr_len(const void *data)
+{
+	const struct ethhdr *eth = data;
+
+	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
+{
+	struct device *dev = ndev_to_dev(buf_pool->ndev);
+	struct xgene_enet_raw_desc16 *raw_desc;
+	dma_addr_t dma_addr;
+	int i;
+
+	/* Free up the buffers held by hardware */
+	for (i = 0; i < buf_pool->slots; i++) {
+		if (buf_pool->rx_skb[i]) {
+			dev_kfree_skb_any(buf_pool->rx_skb[i]);
+
+			raw_desc = &buf_pool->raw_desc16[i];
+			dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
+			dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
+					 DMA_FROM_DEVICE);
+		}
+	}
+}
+
+static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
+{
+	struct device *dev = ndev_to_dev(buf_pool->ndev);
+	dma_addr_t dma_addr;
+	struct page *page;
+	int i;
+
+	/* Free up the buffers held by hardware */
+	for (i = 0; i < buf_pool->slots; i++) {
+		page = buf_pool->frag_page[i];
+		if (page) {
+			dma_addr = buf_pool->frag_dma_addr[i];
+			dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+				       DMA_FROM_DEVICE);
+			put_page(page);
+		}
+	}
+}
+
+static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
+{
+	struct xgene_enet_desc_ring *rx_ring = data;
+
+	if (napi_schedule_prep(&rx_ring->napi)) {
+		disable_irq_nosync(irq);
+		__napi_schedule(&rx_ring->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
+				    struct xgene_enet_raw_desc *raw_desc)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
+	struct sk_buff *skb;
+	struct device *dev;
+	skb_frag_t *frag;
+	dma_addr_t *frag_dma_addr;
+	u16 skb_index;
+	u8 mss_index;
+	u8 status;
+	int i;
+
+	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
+	skb = cp_ring->cp_skb[skb_index];
+	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
+
+	dev = ndev_to_dev(cp_ring->ndev);
+	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
+			 skb_headlen(skb),
+			 DMA_TO_DEVICE);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
+			       DMA_TO_DEVICE);
+	}
+
+	if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
+		mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
+		spin_lock(&pdata->mss_lock);
+		pdata->mss_refcnt[mss_index]--;
+		spin_unlock(&pdata->mss_lock);
+	}
+
+	/* Checking for error */
+	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
+	if (unlikely(status > 2)) {
+		cp_ring->tx_dropped++;
+		cp_ring->tx_errors++;
+	}
+
+	if (likely(skb)) {
+		dev_kfree_skb_any(skb);
+	} else {
+		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
+	}
+
+	return 0;
+}
+
+static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	int mss_index = -EBUSY;
+	int i;
+
+	spin_lock(&pdata->mss_lock);
+
+	/* Reuse the slot if MSS matches */
+	for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
+		if (pdata->mss[i] == mss) {
+			pdata->mss_refcnt[i]++;
+			mss_index = i;
+		}
+	}
+
+	/* Overwrite the slot with ref_count = 0 */
+	for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
+		if (!pdata->mss_refcnt[i]) {
+			pdata->mss_refcnt[i]++;
+			pdata->mac_ops->set_mss(pdata, mss, i);
+			pdata->mss[i] = mss;
+			mss_index = i;
+		}
+	}
+
+	spin_unlock(&pdata->mss_lock);
+
+	return mss_index;
+}
+
+static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
+{
+	struct net_device *ndev = skb->dev;
+	struct iphdr *iph;
+	u8 l3hlen = 0, l4hlen = 0;
+	u8 ethhdr, proto = 0, csum_enable = 0;
+	u32 hdr_len, mss = 0;
+	u32 i, len, nr_frags;
+	int mss_index;
+
+	ethhdr = xgene_enet_hdr_len(skb->data);
+
+	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
+	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
+		goto out;
+
+	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
+		goto out;
+
+	iph = ip_hdr(skb);
+	if (unlikely(ip_is_fragment(iph)))
+		goto out;
+
+	if (likely(iph->protocol == IPPROTO_TCP)) {
+		l4hlen = tcp_hdrlen(skb) >> 2;
+		csum_enable = 1;
+		proto = TSO_IPPROTO_TCP;
+		if (ndev->features & NETIF_F_TSO) {
+			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
+			mss = skb_shinfo(skb)->gso_size;
+
+			if (skb_is_nonlinear(skb)) {
+				len = skb_headlen(skb);
+				nr_frags = skb_shinfo(skb)->nr_frags;
+
+				for (i = 0; i < 2 && i < nr_frags; i++)
+					len += skb_shinfo(skb)->frags[i].size;
+
+				/* HW requires header must reside in 3 buffer */
+				if (unlikely(hdr_len > len)) {
+					if (skb_linearize(skb))
+						return 0;
+				}
+			}
+
+			if (!mss || ((skb->len - hdr_len) <= mss))
+				goto out;
+
+			mss_index = xgene_enet_setup_mss(ndev, mss);
+			if (unlikely(mss_index < 0))
+				return -EBUSY;
+
+			*hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
+		}
+	} else if (iph->protocol == IPPROTO_UDP) {
+		l4hlen = UDP_HDR_SIZE;
+		csum_enable = 1;
+	}
+out:
+	l3hlen = ip_hdrlen(skb) >> 2;
+	*hopinfo |= SET_VAL(TCPHDR, l4hlen) |
+		    SET_VAL(IPHDR, l3hlen) |
+		    SET_VAL(ETHHDR, ethhdr) |
+		    SET_VAL(EC, csum_enable) |
+		    SET_VAL(IS, proto) |
+		    SET_BIT(IC) |
+		    SET_BIT(TYPE_ETH_WORK_MESSAGE);
+
+	return 0;
+}
+
+static u16 xgene_enet_encode_len(u16 len)
+{
+	return (len == BUFLEN_16K) ? 0 : len;
+}
+
+static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
+{
+	desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
+				    SET_VAL(BUFDATALEN, len));
+}
+
+static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
+{
+	__le64 *exp_bufs;
+
+	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
+	memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
+	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
+
+	return exp_bufs;
+}
+
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
+{
+	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
+}
+
+static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
+				    struct sk_buff *skb)
+{
+	struct device *dev = ndev_to_dev(tx_ring->ndev);
+	struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
+	struct xgene_enet_raw_desc *raw_desc;
+	__le64 *exp_desc = NULL, *exp_bufs = NULL;
+	dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
+	skb_frag_t *frag;
+	u16 tail = tx_ring->tail;
+	u64 hopinfo = 0;
+	u32 len, hw_len;
+	u8 ll = 0, nv = 0, idx = 0;
+	bool split = false;
+	u32 size, offset, ell_bytes = 0;
+	u32 i, fidx, nr_frags, count = 1;
+	int ret;
+
+	raw_desc = &tx_ring->raw_desc[tail];
+	tail = (tail + 1) & (tx_ring->slots - 1);
+	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+	ret = xgene_enet_work_msg(skb, &hopinfo);
+	if (ret)
+		return ret;
+
+	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
+				   hopinfo);
+
+	len = skb_headlen(skb);
+	hw_len = xgene_enet_encode_len(len);
+
+	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_addr)) {
+		netdev_err(tx_ring->ndev, "DMA mapping error\n");
+		return -EINVAL;
+	}
+
+	/* Hardware expects descriptor in little endian format */
+	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+				   SET_VAL(BUFDATALEN, hw_len) |
+				   SET_BIT(COHERENT));
+
+	if (!skb_is_nonlinear(skb))
+		goto out;
+
+	/* scatter gather */
+	nv = 1;
+	exp_desc = (void *)&tx_ring->raw_desc[tail];
+	tail = (tail + 1) & (tx_ring->slots - 1);
+	memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (i = nr_frags; i < 4 ; i++)
+		exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
+
+	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
+
+	for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
+		if (!split) {
+			frag = &skb_shinfo(skb)->frags[fidx];
+			size = skb_frag_size(frag);
+			offset = 0;
+
+			pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
+						     DMA_TO_DEVICE);
+			if (dma_mapping_error(dev, pbuf_addr))
+				return -EINVAL;
+
+			frag_dma_addr[fidx] = pbuf_addr;
+			fidx++;
+
+			if (size > BUFLEN_16K)
+				split = true;
+		}
+
+		if (size > BUFLEN_16K) {
+			len = BUFLEN_16K;
+			size -= BUFLEN_16K;
+		} else {
+			len = size;
+			split = false;
+		}
+
+		dma_addr = pbuf_addr + offset;
+		hw_len = xgene_enet_encode_len(len);
+
+		switch (i) {
+		case 0:
+		case 1:
+		case 2:
+			xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
+			break;
+		case 3:
+			if (split || (fidx != nr_frags)) {
+				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
+				xgene_set_addr_len(exp_bufs, idx, dma_addr,
+						   hw_len);
+				idx++;
+				ell_bytes += len;
+			} else {
+				xgene_set_addr_len(exp_desc, i, dma_addr,
+						   hw_len);
+			}
+			break;
+		default:
+			xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
+			idx++;
+			ell_bytes += len;
+			break;
+		}
+
+		if (split)
+			offset += BUFLEN_16K;
+	}
+	count++;
+
+	if (idx) {
+		ll = 1;
+		dma_addr = dma_map_single(dev, exp_bufs,
+					  sizeof(u64) * MAX_EXP_BUFFS,
+					  DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+		i = ell_bytes >> LL_BYTES_LSB_LEN;
+		exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+					  SET_VAL(LL_BYTES_MSB, i) |
+					  SET_VAL(LL_LEN, idx));
+		raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
+	}
+
+out:
+	raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
+				   SET_VAL(USERINFO, tx_ring->tail));
+	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+	pdata->tx_level[tx_ring->cp_ring->index] += count;
+	tx_ring->tail = tail;
+
+	return count;
+}
+
+static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
+					 struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_enet_desc_ring *tx_ring;
+	int index = skb->queue_mapping;
+	u32 tx_level = pdata->tx_level[index];
+	int count;
+
+	tx_ring = pdata->tx_ring[index];
+	if (tx_level < pdata->txc_level[index])
+		tx_level += ((typeof(pdata->tx_level[index]))~0U);
+
+	if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
+		netif_stop_subqueue(ndev, index);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
+		return NETDEV_TX_OK;
+
+	count = xgene_enet_setup_tx_desc(tx_ring, skb);
+	if (count == -EBUSY)
+		return NETDEV_TX_BUSY;
+
+	if (count <= 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	skb_tx_timestamp(skb);
+
+	tx_ring->tx_packets++;
+	tx_ring->tx_bytes += skb->len;
+
+	pdata->ring_ops->wr_cmd(tx_ring, count);
+	return NETDEV_TX_OK;
+}
+
+static void xgene_enet_rx_csum(struct sk_buff *skb)
+{
+	struct net_device *ndev = skb->dev;
+	struct iphdr *iph = ip_hdr(skb);
+
+	if (!(ndev->features & NETIF_F_RXCSUM))
+		return;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return;
+
+	if (ip_is_fragment(iph))
+		return;
+
+	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+		return;
+
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
+				     struct xgene_enet_raw_desc *raw_desc,
+				     struct xgene_enet_raw_desc *exp_desc)
+{
+	__le64 *desc = (void *)exp_desc;
+	dma_addr_t dma_addr;
+	struct device *dev;
+	struct page *page;
+	u16 slots, head;
+	u32 frag_size;
+	int i;
+
+	if (!buf_pool || !raw_desc || !exp_desc ||
+	    (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
+		return;
+
+	dev = ndev_to_dev(buf_pool->ndev);
+	slots = buf_pool->slots - 1;
+	head = buf_pool->head;
+
+	for (i = 0; i < 4; i++) {
+		frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
+		if (!frag_size)
+			break;
+
+		dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
+		dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+		page = buf_pool->frag_page[head];
+		put_page(page);
+
+		buf_pool->frag_page[head] = NULL;
+		head = (head + 1) & slots;
+	}
+	buf_pool->head = head;
+}
+
+/* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
+static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
+{
+	if (status == INGRESS_CRC &&
+	    len >= (ETHER_STD_PACKET + 1) &&
+	    len <= (ETHER_STD_PACKET + 4) &&
+	    skb->protocol == htons(ETH_P_8021Q))
+		return true;
+
+	return false;
+}
+
+/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
+static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
+{
+	if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
+		if (ntohs(eth_hdr(skb)->h_proto) < 46)
+			return true;
+	}
+
+	return false;
+}
+
+static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
+			       struct xgene_enet_raw_desc *raw_desc,
+			       struct xgene_enet_raw_desc *exp_desc)
+{
+	struct xgene_enet_desc_ring *buf_pool, *page_pool;
+	u32 datalen, frag_size, skb_index;
+	struct xgene_enet_pdata *pdata;
+	struct net_device *ndev;
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	struct device *dev;
+	struct page *page;
+	u16 slots, head;
+	int i, ret = 0;
+	__le64 *desc;
+	u8 status;
+	bool nv;
+
+	ndev = rx_ring->ndev;
+	pdata = netdev_priv(ndev);
+	dev = ndev_to_dev(rx_ring->ndev);
+	buf_pool = rx_ring->buf_pool;
+	page_pool = rx_ring->page_pool;
+
+	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
+			 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
+	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
+	skb = buf_pool->rx_skb[skb_index];
+	buf_pool->rx_skb[skb_index] = NULL;
+
+	datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+	skb_put(skb, datalen);
+	prefetch(skb->data - NET_IP_ALIGN);
+	skb->protocol = eth_type_trans(skb, ndev);
+
+	/* checking for error */
+	status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
+		  GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
+	if (unlikely(status)) {
+		if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
+			pdata->false_rflr++;
+		} else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
+			pdata->vlan_rjbr++;
+		} else {
+			dev_kfree_skb_any(skb);
+			xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
+			xgene_enet_parse_error(rx_ring, status);
+			rx_ring->rx_dropped++;
+			goto out;
+		}
+	}
+
+	nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+	if (!nv) {
+		/* strip off CRC as HW isn't doing this */
+		datalen -= 4;
+		goto skip_jumbo;
+	}
+
+	slots = page_pool->slots - 1;
+	head = page_pool->head;
+	desc = (void *)exp_desc;
+
+	for (i = 0; i < 4; i++) {
+		frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
+		if (!frag_size)
+			break;
+
+		dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
+		dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+		page = page_pool->frag_page[head];
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
+				frag_size, PAGE_SIZE);
+
+		datalen += frag_size;
+
+		page_pool->frag_page[head] = NULL;
+		head = (head + 1) & slots;
+	}
+
+	page_pool->head = head;
+	rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
+
+skip_jumbo:
+	skb_checksum_none_assert(skb);
+	xgene_enet_rx_csum(skb);
+
+	rx_ring->rx_packets++;
+	rx_ring->rx_bytes += datalen;
+	napi_gro_receive(&rx_ring->napi, skb);
+
+out:
+	if (rx_ring->npagepool <= 0) {
+		ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
+		rx_ring->npagepool = NUM_NXTBUFPOOL;
+		if (ret)
+			return ret;
+	}
+
+	if (--rx_ring->nbufpool == 0) {
+		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
+		rx_ring->nbufpool = NUM_BUFPOOL;
+	}
+
+	return ret;
+}
+
+static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
+{
+	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
+}
+
+static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
+				   int budget)
+{
+	struct net_device *ndev = ring->ndev;
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_enet_raw_desc *raw_desc, *exp_desc;
+	u16 head = ring->head;
+	u16 slots = ring->slots - 1;
+	int ret, desc_count, count = 0, processed = 0;
+	bool is_completion;
+
+	do {
+		raw_desc = &ring->raw_desc[head];
+		desc_count = 0;
+		is_completion = false;
+		exp_desc = NULL;
+		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
+			break;
+
+		/* read fpqnum field after dataaddr field */
+		dma_rmb();
+		if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
+			head = (head + 1) & slots;
+			exp_desc = &ring->raw_desc[head];
+
+			if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
+				head = (head - 1) & slots;
+				break;
+			}
+			dma_rmb();
+			count++;
+			desc_count++;
+		}
+		if (is_rx_desc(raw_desc)) {
+			ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
+		} else {
+			ret = xgene_enet_tx_completion(ring, raw_desc);
+			is_completion = true;
+		}
+		xgene_enet_mark_desc_slot_empty(raw_desc);
+		if (exp_desc)
+			xgene_enet_mark_desc_slot_empty(exp_desc);
+
+		head = (head + 1) & slots;
+		count++;
+		desc_count++;
+		processed++;
+		if (is_completion)
+			pdata->txc_level[ring->index] += desc_count;
+
+		if (ret)
+			break;
+	} while (--budget);
+
+	if (likely(count)) {
+		pdata->ring_ops->wr_cmd(ring, -count);
+		ring->head = head;
+
+		if (__netif_subqueue_stopped(ndev, ring->index))
+			netif_start_subqueue(ndev, ring->index);
+	}
+
+	return processed;
+}
+
+static int xgene_enet_napi(struct napi_struct *napi, const int budget)
+{
+	struct xgene_enet_desc_ring *ring;
+	int processed;
+
+	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
+	processed = xgene_enet_process_ring(ring, budget);
+
+	if (processed != budget) {
+		napi_complete_done(napi, processed);
+		enable_irq(ring->irq);
+	}
+
+	return processed;
+}
+
+static void xgene_enet_timeout(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct netdev_queue *txq;
+	int i;
+
+	pdata->mac_ops->reset(pdata);
+
+	for (i = 0; i < pdata->txq_cnt; i++) {
+		txq = netdev_get_tx_queue(ndev, i);
+		txq->trans_start = jiffies;
+		netif_tx_start_queue(txq);
+	}
+}
+
+static void xgene_enet_set_irq_name(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_enet_desc_ring *ring;
+	int i;
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		if (!pdata->cq_cnt) {
+			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
+				 ndev->name);
+		} else {
+			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
+				 ndev->name, i);
+		}
+	}
+
+	for (i = 0; i < pdata->cq_cnt; i++) {
+		ring = pdata->tx_ring[i]->cp_ring;
+		snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
+			 ndev->name, i);
+	}
+}
+
+static int xgene_enet_register_irq(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = ndev_to_dev(ndev);
+	struct xgene_enet_desc_ring *ring;
+	int ret = 0, i;
+
+	xgene_enet_set_irq_name(ndev);
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+				       0, ring->irq_name, ring);
+		if (ret) {
+			netdev_err(ndev, "Failed to request irq %s\n",
+				   ring->irq_name);
+		}
+	}
+
+	for (i = 0; i < pdata->cq_cnt; i++) {
+		ring = pdata->tx_ring[i]->cp_ring;
+		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+				       0, ring->irq_name, ring);
+		if (ret) {
+			netdev_err(ndev, "Failed to request irq %s\n",
+				   ring->irq_name);
+		}
+	}
+
+	return ret;
+}
+
+static void xgene_enet_free_irq(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata;
+	struct xgene_enet_desc_ring *ring;
+	struct device *dev;
+	int i;
+
+	pdata = netdev_priv(ndev);
+	dev = ndev_to_dev(ndev);
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+		devm_free_irq(dev, ring->irq, ring);
+	}
+
+	for (i = 0; i < pdata->cq_cnt; i++) {
+		ring = pdata->tx_ring[i]->cp_ring;
+		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+		devm_free_irq(dev, ring->irq, ring);
+	}
+}
+
+static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
+{
+	struct napi_struct *napi;
+	int i;
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		napi = &pdata->rx_ring[i]->napi;
+		napi_enable(napi);
+	}
+
+	for (i = 0; i < pdata->cq_cnt; i++) {
+		napi = &pdata->tx_ring[i]->cp_ring->napi;
+		napi_enable(napi);
+	}
+}
+
+static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
+{
+	struct napi_struct *napi;
+	int i;
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		napi = &pdata->rx_ring[i]->napi;
+		napi_disable(napi);
+	}
+
+	for (i = 0; i < pdata->cq_cnt; i++) {
+		napi = &pdata->tx_ring[i]->cp_ring->napi;
+		napi_disable(napi);
+	}
+}
+
+static int xgene_enet_open(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
+	int ret;
+
+	ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
+	if (ret)
+		return ret;
+
+	ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
+	if (ret)
+		return ret;
+
+	xgene_enet_napi_enable(pdata);
+	ret = xgene_enet_register_irq(ndev);
+	if (ret)
+		return ret;
+
+	if (ndev->phydev) {
+		phy_start(ndev->phydev);
+	} else {
+		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
+		netif_carrier_off(ndev);
+	}
+
+	mac_ops->tx_enable(pdata);
+	mac_ops->rx_enable(pdata);
+	netif_tx_start_all_queues(ndev);
+
+	return ret;
+}
+
+static int xgene_enet_close(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
+	int i;
+
+	netif_tx_stop_all_queues(ndev);
+	mac_ops->tx_disable(pdata);
+	mac_ops->rx_disable(pdata);
+
+	if (ndev->phydev)
+		phy_stop(ndev->phydev);
+	else
+		cancel_delayed_work_sync(&pdata->link_work);
+
+	xgene_enet_free_irq(ndev);
+	xgene_enet_napi_disable(pdata);
+	for (i = 0; i < pdata->rxq_cnt; i++)
+		xgene_enet_process_ring(pdata->rx_ring[i], -1);
+
+	return 0;
+}
+static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata;
+	struct device *dev;
+
+	pdata = netdev_priv(ring->ndev);
+	dev = ndev_to_dev(ring->ndev);
+
+	pdata->ring_ops->clear(ring);
+	dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+}
+
+static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
+{
+	struct xgene_enet_desc_ring *buf_pool, *page_pool;
+	struct xgene_enet_desc_ring *ring;
+	int i;
+
+	for (i = 0; i < pdata->txq_cnt; i++) {
+		ring = pdata->tx_ring[i];
+		if (ring) {
+			xgene_enet_delete_ring(ring);
+			pdata->port_ops->clear(pdata, ring);
+			if (pdata->cq_cnt)
+				xgene_enet_delete_ring(ring->cp_ring);
+			pdata->tx_ring[i] = NULL;
+		}
+
+	}
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		if (ring) {
+			page_pool = ring->page_pool;
+			if (page_pool) {
+				xgene_enet_delete_pagepool(page_pool);
+				xgene_enet_delete_ring(page_pool);
+				pdata->port_ops->clear(pdata, page_pool);
+			}
+
+			buf_pool = ring->buf_pool;
+			xgene_enet_delete_bufpool(buf_pool);
+			xgene_enet_delete_ring(buf_pool);
+			pdata->port_ops->clear(pdata, buf_pool);
+
+			xgene_enet_delete_ring(ring);
+			pdata->rx_ring[i] = NULL;
+		}
+
+	}
+}
+
+static int xgene_enet_get_ring_size(struct device *dev,
+				    enum xgene_enet_ring_cfgsize cfgsize)
+{
+	int size = -EINVAL;
+
+	switch (cfgsize) {
+	case RING_CFGSIZE_512B:
+		size = 0x200;
+		break;
+	case RING_CFGSIZE_2KB:
+		size = 0x800;
+		break;
+	case RING_CFGSIZE_16KB:
+		size = 0x4000;
+		break;
+	case RING_CFGSIZE_64KB:
+		size = 0x10000;
+		break;
+	case RING_CFGSIZE_512KB:
+		size = 0x80000;
+		break;
+	default:
+		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
+		break;
+	}
+
+	return size;
+}
+
+static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata;
+	struct device *dev;
+
+	if (!ring)
+		return;
+
+	dev = ndev_to_dev(ring->ndev);
+	pdata = netdev_priv(ring->ndev);
+
+	if (ring->desc_addr) {
+		pdata->ring_ops->clear(ring);
+		dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+	}
+	devm_kfree(dev, ring);
+}
+
+static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
+{
+	struct xgene_enet_desc_ring *page_pool;
+	struct device *dev = &pdata->pdev->dev;
+	struct xgene_enet_desc_ring *ring;
+	void *p;
+	int i;
+
+	for (i = 0; i < pdata->txq_cnt; i++) {
+		ring = pdata->tx_ring[i];
+		if (ring) {
+			if (ring->cp_ring && ring->cp_ring->cp_skb)
+				devm_kfree(dev, ring->cp_ring->cp_skb);
+
+			if (ring->cp_ring && pdata->cq_cnt)
+				xgene_enet_free_desc_ring(ring->cp_ring);
+
+			xgene_enet_free_desc_ring(ring);
+		}
+
+	}
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		if (ring) {
+			if (ring->buf_pool) {
+				if (ring->buf_pool->rx_skb)
+					devm_kfree(dev, ring->buf_pool->rx_skb);
+
+				xgene_enet_free_desc_ring(ring->buf_pool);
+			}
+
+			page_pool = ring->page_pool;
+			if (page_pool) {
+				p = page_pool->frag_page;
+				if (p)
+					devm_kfree(dev, p);
+
+				p = page_pool->frag_dma_addr;
+				if (p)
+					devm_kfree(dev, p);
+			}
+
+			xgene_enet_free_desc_ring(ring);
+		}
+	}
+}
+
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+				 struct xgene_enet_desc_ring *ring)
+{
+	if ((pdata->enet_id == XGENE_ENET2) &&
+	    (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+		return true;
+	}
+
+	return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+					      struct xgene_enet_desc_ring *ring)
+{
+	u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+	return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
+			struct net_device *ndev, u32 ring_num,
+			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = ndev_to_dev(ndev);
+	struct xgene_enet_desc_ring *ring;
+	void *irq_mbox_addr;
+	int size;
+
+	size = xgene_enet_get_ring_size(dev, cfgsize);
+	if (size < 0)
+		return NULL;
+
+	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
+			    GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	ring->ndev = ndev;
+	ring->num = ring_num;
+	ring->cfgsize = cfgsize;
+	ring->id = ring_id;
+
+	ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
+					      GFP_KERNEL | __GFP_ZERO);
+	if (!ring->desc_addr) {
+		devm_kfree(dev, ring);
+		return NULL;
+	}
+	ring->size = size;
+
+	if (is_irq_mbox_required(pdata, ring)) {
+		irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
+						    &ring->irq_mbox_dma,
+						    GFP_KERNEL | __GFP_ZERO);
+		if (!irq_mbox_addr) {
+			dmam_free_coherent(dev, size, ring->desc_addr,
+					   ring->dma);
+			devm_kfree(dev, ring);
+			return NULL;
+		}
+		ring->irq_mbox_addr = irq_mbox_addr;
+	}
+
+	ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
+	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
+	ring = pdata->ring_ops->setup(ring);
+	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
+		   ring->num, ring->size, ring->id, ring->slots);
+
+	return ring;
+}
+
+static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
+{
+	return (owner << 6) | (bufnum & GENMASK(5, 0));
+}
+
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+	enum xgene_ring_owner owner;
+
+	if (p->enet_id == XGENE_ENET1) {
+		switch (p->phy_mode) {
+		case PHY_INTERFACE_MODE_SGMII:
+			owner = RING_OWNER_ETH0;
+			break;
+		default:
+			owner = (!p->port_id) ? RING_OWNER_ETH0 :
+						RING_OWNER_ETH1;
+			break;
+		}
+	} else {
+		owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+	}
+
+	return owner;
+}
+
+static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	u32 cpu_bufnum;
+	int ret;
+
+	ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
+
+	return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
+}
+
+static int xgene_enet_create_desc_rings(struct net_device *ndev)
+{
+	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_enet_desc_ring *page_pool = NULL;
+	struct xgene_enet_desc_ring *buf_pool = NULL;
+	struct device *dev = ndev_to_dev(ndev);
+	u8 eth_bufnum = pdata->eth_bufnum;
+	u8 bp_bufnum = pdata->bp_bufnum;
+	u16 ring_num = pdata->ring_num;
+	enum xgene_ring_owner owner;
+	dma_addr_t dma_exp_bufs;
+	u16 ring_id, slots;
+	__le64 *exp_bufs;
+	int i, ret, size;
+	u8 cpu_bufnum;
+
+	cpu_bufnum = xgene_start_cpu_bufnum(pdata);
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		/* allocate rx descriptor ring */
+		owner = xgene_derive_ring_owner(pdata);
+		ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
+		rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+						      RING_CFGSIZE_16KB,
+						      ring_id);
+		if (!rx_ring) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/* allocate buffer pool for receiving packets */
+		owner = xgene_derive_ring_owner(pdata);
+		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
+		buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
+						       RING_CFGSIZE_16KB,
+						       ring_id);
+		if (!buf_pool) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		rx_ring->nbufpool = NUM_BUFPOOL;
+		rx_ring->npagepool = NUM_NXTBUFPOOL;
+		rx_ring->irq = pdata->irqs[i];
+		buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
+						sizeof(struct sk_buff *),
+						GFP_KERNEL);
+		if (!buf_pool->rx_skb) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
+		rx_ring->buf_pool = buf_pool;
+		pdata->rx_ring[i] = rx_ring;
+
+		if ((pdata->enet_id == XGENE_ENET1 &&  pdata->rxq_cnt > 4) ||
+		    (pdata->enet_id == XGENE_ENET2 &&  pdata->rxq_cnt > 16)) {
+			break;
+		}
+
+		/* allocate next buffer pool for jumbo packets */
+		owner = xgene_derive_ring_owner(pdata);
+		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
+		page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
+							RING_CFGSIZE_16KB,
+							ring_id);
+		if (!page_pool) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		slots = page_pool->slots;
+		page_pool->frag_page = devm_kcalloc(dev, slots,
+						    sizeof(struct page *),
+						    GFP_KERNEL);
+		if (!page_pool->frag_page) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
+							sizeof(dma_addr_t),
+							GFP_KERNEL);
+		if (!page_pool->frag_dma_addr) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
+		rx_ring->page_pool = page_pool;
+	}
+
+	for (i = 0; i < pdata->txq_cnt; i++) {
+		/* allocate tx descriptor ring */
+		owner = xgene_derive_ring_owner(pdata);
+		ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
+		tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+						      RING_CFGSIZE_16KB,
+						      ring_id);
+		if (!tx_ring) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+		exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
+					       GFP_KERNEL | __GFP_ZERO);
+		if (!exp_bufs) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		tx_ring->exp_bufs = exp_bufs;
+
+		pdata->tx_ring[i] = tx_ring;
+
+		if (!pdata->cq_cnt) {
+			cp_ring = pdata->rx_ring[i];
+		} else {
+			/* allocate tx completion descriptor ring */
+			ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
+							 cpu_bufnum++);
+			cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+							      RING_CFGSIZE_16KB,
+							      ring_id);
+			if (!cp_ring) {
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
+			cp_ring->index = i;
+		}
+
+		cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
+					       sizeof(struct sk_buff *),
+					       GFP_KERNEL);
+		if (!cp_ring->cp_skb) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+		cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+						      size, GFP_KERNEL);
+		if (!cp_ring->frag_dma_addr) {
+			devm_kfree(dev, cp_ring->cp_skb);
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		tx_ring->cp_ring = cp_ring;
+		tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
+	}
+
+	if (pdata->ring_ops->coalesce)
+		pdata->ring_ops->coalesce(pdata->tx_ring[0]);
+	pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
+
+	return 0;
+
+err:
+	xgene_enet_free_desc_rings(pdata);
+	return ret;
+}
+
+static void xgene_enet_get_stats64(
+			struct net_device *ndev,
+			struct rtnl_link_stats64 *stats)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_enet_desc_ring *ring;
+	int i;
+
+	for (i = 0; i < pdata->txq_cnt; i++) {
+		ring = pdata->tx_ring[i];
+		if (ring) {
+			stats->tx_packets += ring->tx_packets;
+			stats->tx_bytes += ring->tx_bytes;
+			stats->tx_dropped += ring->tx_dropped;
+			stats->tx_errors += ring->tx_errors;
+		}
+	}
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		if (ring) {
+			stats->rx_packets += ring->rx_packets;
+			stats->rx_bytes += ring->rx_bytes;
+			stats->rx_dropped += ring->rx_dropped;
+			stats->rx_errors += ring->rx_errors +
+				ring->rx_length_errors +
+				ring->rx_crc_errors +
+				ring->rx_frame_errors +
+				ring->rx_fifo_errors;
+			stats->rx_length_errors += ring->rx_length_errors;
+			stats->rx_crc_errors += ring->rx_crc_errors;
+			stats->rx_frame_errors += ring->rx_frame_errors;
+			stats->rx_fifo_errors += ring->rx_fifo_errors;
+		}
+	}
+}
+
+static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	ret = eth_mac_addr(ndev, addr);
+	if (ret)
+		return ret;
+	pdata->mac_ops->set_mac_addr(pdata);
+
+	return ret;
+}
+
+static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	int frame_size;
+
+	if (!netif_running(ndev))
+		return 0;
+
+	frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
+
+	xgene_enet_close(ndev);
+	ndev->mtu = new_mtu;
+	pdata->mac_ops->set_framesize(pdata, frame_size);
+	xgene_enet_open(ndev);
+
+	return 0;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+	.ndo_open = xgene_enet_open,
+	.ndo_stop = xgene_enet_close,
+	.ndo_start_xmit = xgene_enet_start_xmit,
+	.ndo_tx_timeout = xgene_enet_timeout,
+	.ndo_get_stats64 = xgene_enet_get_stats64,
+	.ndo_change_mtu = xgene_change_mtu,
+	.ndo_set_mac_address = xgene_enet_set_mac_address,
+};
+
+#ifdef CONFIG_ACPI
+static void xgene_get_port_id_acpi(struct device *dev,
+				  struct xgene_enet_pdata *pdata)
+{
+	acpi_status status;
+	u64 temp;
+
+	status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
+	if (ACPI_FAILURE(status)) {
+		pdata->port_id = 0;
+	} else {
+		pdata->port_id = temp;
+	}
+
+	return;
+}
+#endif
+
+static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
+{
+	u32 id = 0;
+
+	of_property_read_u32(dev->of_node, "port-id", &id);
+
+	pdata->port_id = id & BIT(0);
+
+	return;
+}
+
+static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	int delay, ret;
+
+	ret = device_property_read_u32(dev, "tx-delay", &delay);
+	if (ret) {
+		pdata->tx_delay = 4;
+		return 0;
+	}
+
+	if (delay < 0 || delay > 7) {
+		dev_err(dev, "Invalid tx-delay specified\n");
+		return -EINVAL;
+	}
+
+	pdata->tx_delay = delay;
+
+	return 0;
+}
+
+static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	int delay, ret;
+
+	ret = device_property_read_u32(dev, "rx-delay", &delay);
+	if (ret) {
+		pdata->rx_delay = 2;
+		return 0;
+	}
+
+	if (delay < 0 || delay > 7) {
+		dev_err(dev, "Invalid rx-delay specified\n");
+		return -EINVAL;
+	}
+
+	pdata->rx_delay = delay;
+
+	return 0;
+}
+
+static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
+{
+	struct platform_device *pdev = pdata->pdev;
+	struct device *dev = &pdev->dev;
+	int i, ret, max_irqs;
+
+	if (phy_interface_mode_is_rgmii(pdata->phy_mode))
+		max_irqs = 1;
+	else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
+		max_irqs = 2;
+	else
+		max_irqs = XGENE_MAX_ENET_IRQ;
+
+	for (i = 0; i < max_irqs; i++) {
+		ret = platform_get_irq(pdev, i);
+		if (ret <= 0) {
+			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+				max_irqs = i;
+				pdata->rxq_cnt = max_irqs / 2;
+				pdata->txq_cnt = max_irqs / 2;
+				pdata->cq_cnt = max_irqs / 2;
+				break;
+			}
+			dev_err(dev, "Unable to get ENET IRQ\n");
+			ret = ret ? : -ENXIO;
+			return ret;
+		}
+		pdata->irqs[i] = ret;
+	}
+
+	return 0;
+}
+
+static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+{
+	int ret;
+
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
+		return;
+
+	if (!IS_ENABLED(CONFIG_MDIO_XGENE))
+		return;
+
+	ret = xgene_enet_phy_connect(pdata->ndev);
+	if (!ret)
+		pdata->mdio_driver = true;
+}
+
+static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	pdata->sfp_gpio_en = false;
+	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
+	    (!device_property_present(dev, "sfp-gpios") &&
+	     !device_property_present(dev, "rxlos-gpios")))
+		return;
+
+	pdata->sfp_gpio_en = true;
+	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
+	if (IS_ERR(pdata->sfp_rdy))
+		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
+}
+
+static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
+{
+	struct platform_device *pdev;
+	struct net_device *ndev;
+	struct device *dev;
+	struct resource *res;
+	void __iomem *base_addr;
+	u32 offset;
+	int ret = 0;
+
+	pdev = pdata->pdev;
+	dev = &pdev->dev;
+	ndev = pdata->ndev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
+	if (!res) {
+		dev_err(dev, "Resource enet_csr not defined\n");
+		return -ENODEV;
+	}
+	pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
+	if (!pdata->base_addr) {
+		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
+	if (!res) {
+		dev_err(dev, "Resource ring_csr not defined\n");
+		return -ENODEV;
+	}
+	pdata->ring_csr_addr = devm_ioremap(dev, res->start,
+							resource_size(res));
+	if (!pdata->ring_csr_addr) {
+		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
+	if (!res) {
+		dev_err(dev, "Resource ring_cmd not defined\n");
+		return -ENODEV;
+	}
+	pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
+							resource_size(res));
+	if (!pdata->ring_cmd_addr) {
+		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
+		return -ENOMEM;
+	}
+
+	if (dev->of_node)
+		xgene_get_port_id_dt(dev, pdata);
+#ifdef CONFIG_ACPI
+	else
+		xgene_get_port_id_acpi(dev, pdata);
+#endif
+
+	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+		eth_hw_addr_random(ndev);
+
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	pdata->phy_mode = device_get_phy_mode(dev);
+	if (pdata->phy_mode < 0) {
+		dev_err(dev, "Unable to get phy-connection-type\n");
+		return pdata->phy_mode;
+	}
+	if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
+	    pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
+	    pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+		dev_err(dev, "Incorrect phy-connection-type specified\n");
+		return -ENODEV;
+	}
+
+	ret = xgene_get_tx_delay(pdata);
+	if (ret)
+		return ret;
+
+	ret = xgene_get_rx_delay(pdata);
+	if (ret)
+		return ret;
+
+	ret = xgene_enet_get_irqs(pdata);
+	if (ret)
+		return ret;
+
+	xgene_enet_gpiod_get(pdata);
+
+	pdata->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pdata->clk)) {
+		if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
+			/* Abort if the clock is defined but couldn't be
+			 * retrived. Always abort if the clock is missing on
+			 * DT system as the driver can't cope with this case.
+			 */
+			if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
+				return PTR_ERR(pdata->clk);
+			/* Firmware may have set up the clock already. */
+			dev_info(dev, "clocks have been setup already\n");
+		}
+	}
+
+	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+		base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+	else
+		base_addr = pdata->base_addr;
+	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
+	pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
+	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
+	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
+	if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
+	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+		pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
+		pdata->mcx_stats_addr =
+			pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
+		offset = (pdata->enet_id == XGENE_ENET1) ?
+			  BLOCK_ETH_MAC_CSR_OFFSET :
+			  X2_BLOCK_ETH_MAC_CSR_OFFSET;
+		pdata->mcx_mac_csr_addr = base_addr + offset;
+	} else {
+		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
+		pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
+		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
+		pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
+	}
+	pdata->rx_buff_cnt = NUM_PKT_BUF;
+
+	return 0;
+}
+
+static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
+{
+	struct xgene_enet_cle *enet_cle = &pdata->cle;
+	struct xgene_enet_desc_ring *page_pool;
+	struct net_device *ndev = pdata->ndev;
+	struct xgene_enet_desc_ring *buf_pool;
+	u16 dst_ring_num, ring_id;
+	int i, ret;
+	u32 count;
+
+	ret = pdata->port_ops->reset(pdata);
+	if (ret)
+		return ret;
+
+	ret = xgene_enet_create_desc_rings(ndev);
+	if (ret) {
+		netdev_err(ndev, "Error in ring configuration\n");
+		return ret;
+	}
+
+	/* setup buffer pool */
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		buf_pool = pdata->rx_ring[i]->buf_pool;
+		xgene_enet_init_bufpool(buf_pool);
+		page_pool = pdata->rx_ring[i]->page_pool;
+		xgene_enet_init_bufpool(page_pool);
+
+		count = pdata->rx_buff_cnt;
+		ret = xgene_enet_refill_bufpool(buf_pool, count);
+		if (ret)
+			goto err;
+
+		ret = xgene_enet_refill_pagepool(page_pool, count);
+		if (ret)
+			goto err;
+
+	}
+
+	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+	buf_pool = pdata->rx_ring[0]->buf_pool;
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+		/* Initialize and Enable  PreClassifier Tree */
+		enet_cle->max_nodes = 512;
+		enet_cle->max_dbptrs = 1024;
+		enet_cle->parsers = 3;
+		enet_cle->active_parser = PARSER_ALL;
+		enet_cle->ptree.start_node = 0;
+		enet_cle->ptree.start_dbptr = 0;
+		enet_cle->jump_bytes = 8;
+		ret = pdata->cle_ops->cle_init(pdata);
+		if (ret) {
+			netdev_err(ndev, "Preclass Tree init error\n");
+			goto err;
+		}
+
+	} else {
+		dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+		buf_pool = pdata->rx_ring[0]->buf_pool;
+		page_pool = pdata->rx_ring[0]->page_pool;
+		ring_id = (page_pool) ? page_pool->id : 0;
+		pdata->port_ops->cle_bypass(pdata, dst_ring_num,
+					    buf_pool->id, ring_id);
+	}
+
+	ndev->max_mtu = XGENE_ENET_MAX_MTU;
+	pdata->phy_speed = SPEED_UNKNOWN;
+	pdata->mac_ops->init(pdata);
+
+	return ret;
+
+err:
+	xgene_enet_delete_desc_rings(pdata);
+	return ret;
+}
+
+static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
+{
+	switch (pdata->phy_mode) {
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		pdata->mac_ops = &xgene_gmac_ops;
+		pdata->port_ops = &xgene_gport_ops;
+		pdata->rm = RM3;
+		pdata->rxq_cnt = 1;
+		pdata->txq_cnt = 1;
+		pdata->cq_cnt = 0;
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		pdata->mac_ops = &xgene_sgmac_ops;
+		pdata->port_ops = &xgene_sgport_ops;
+		pdata->rm = RM1;
+		pdata->rxq_cnt = 1;
+		pdata->txq_cnt = 1;
+		pdata->cq_cnt = 1;
+		break;
+	default:
+		pdata->mac_ops = &xgene_xgmac_ops;
+		pdata->port_ops = &xgene_xgport_ops;
+		pdata->cle_ops = &xgene_cle3in_ops;
+		pdata->rm = RM0;
+		if (!pdata->rxq_cnt) {
+			pdata->rxq_cnt = XGENE_NUM_RX_RING;
+			pdata->txq_cnt = XGENE_NUM_TX_RING;
+			pdata->cq_cnt = XGENE_NUM_TXC_RING;
+		}
+		break;
+	}
+
+	if (pdata->enet_id == XGENE_ENET1) {
+		switch (pdata->port_id) {
+		case 0:
+			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+				pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+				pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+				pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+				pdata->ring_num = START_RING_NUM_0;
+			} else {
+				pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+				pdata->eth_bufnum = START_ETH_BUFNUM_0;
+				pdata->bp_bufnum = START_BP_BUFNUM_0;
+				pdata->ring_num = START_RING_NUM_0;
+			}
+			break;
+		case 1:
+			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+				pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
+				pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
+				pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
+				pdata->ring_num = XG_START_RING_NUM_1;
+			} else {
+				pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+				pdata->eth_bufnum = START_ETH_BUFNUM_1;
+				pdata->bp_bufnum = START_BP_BUFNUM_1;
+				pdata->ring_num = START_RING_NUM_1;
+			}
+			break;
+		default:
+			break;
+		}
+		pdata->ring_ops = &xgene_ring1_ops;
+	} else {
+		switch (pdata->port_id) {
+		case 0:
+			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+			pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+			pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+			pdata->ring_num = X2_START_RING_NUM_0;
+			break;
+		case 1:
+			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+			pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+			pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+			pdata->ring_num = X2_START_RING_NUM_1;
+			break;
+		default:
+			break;
+		}
+		pdata->rm = RM0;
+		pdata->ring_ops = &xgene_ring2_ops;
+	}
+}
+
+static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
+{
+	struct napi_struct *napi;
+	int i;
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		napi = &pdata->rx_ring[i]->napi;
+		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
+			       NAPI_POLL_WEIGHT);
+	}
+
+	for (i = 0; i < pdata->cq_cnt; i++) {
+		napi = &pdata->tx_ring[i]->cp_ring->napi;
+		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
+			       NAPI_POLL_WEIGHT);
+	}
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+	{ "APMC0D05", XGENE_ENET1},
+	{ "APMC0D30", XGENE_ENET1},
+	{ "APMC0D31", XGENE_ENET1},
+	{ "APMC0D3F", XGENE_ENET1},
+	{ "APMC0D26", XGENE_ENET2},
+	{ "APMC0D25", XGENE_ENET2},
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static const struct of_device_id xgene_enet_of_match[] = {
+	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
+
+static int xgene_enet_probe(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+	struct xgene_enet_pdata *pdata;
+	struct device *dev = &pdev->dev;
+	void (*link_state)(struct work_struct *);
+	const struct of_device_id *of_id;
+	int ret;
+
+	ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
+				  XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
+	if (!ndev)
+		return -ENOMEM;
+
+	pdata = netdev_priv(ndev);
+
+	pdata->pdev = pdev;
+	pdata->ndev = ndev;
+	SET_NETDEV_DEV(ndev, dev);
+	platform_set_drvdata(pdev, pdata);
+	ndev->netdev_ops = &xgene_ndev_ops;
+	xgene_enet_set_ethtool_ops(ndev);
+	ndev->features |= NETIF_F_IP_CSUM |
+			  NETIF_F_GSO |
+			  NETIF_F_GRO |
+			  NETIF_F_SG;
+
+	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+	if (of_id) {
+		pdata->enet_id = (enum xgene_enet_id)of_id->data;
+	}
+#ifdef CONFIG_ACPI
+	else {
+		const struct acpi_device_id *acpi_id;
+
+		acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
+		if (acpi_id)
+			pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
+	}
+#endif
+	if (!pdata->enet_id) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	ret = xgene_enet_get_resources(pdata);
+	if (ret)
+		goto err;
+
+	xgene_enet_setup_ops(pdata);
+	spin_lock_init(&pdata->mac_lock);
+
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+		ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
+		spin_lock_init(&pdata->mss_lock);
+	}
+	ndev->hw_features = ndev->features;
+
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret) {
+		netdev_err(ndev, "No usable DMA configuration\n");
+		goto err;
+	}
+
+	xgene_enet_check_phy_handle(pdata);
+
+	ret = xgene_enet_init_hw(pdata);
+	if (ret)
+		goto err2;
+
+	link_state = pdata->mac_ops->link_state;
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+		INIT_DELAYED_WORK(&pdata->link_work, link_state);
+	} else if (!pdata->mdio_driver) {
+		if (phy_interface_mode_is_rgmii(pdata->phy_mode))
+			ret = xgene_enet_mdio_config(pdata);
+		else
+			INIT_DELAYED_WORK(&pdata->link_work, link_state);
+
+		if (ret)
+			goto err1;
+	}
+
+	spin_lock_init(&pdata->stats_lock);
+	ret = xgene_extd_stats_init(pdata);
+	if (ret)
+		goto err1;
+
+	xgene_enet_napi_add(pdata);
+	ret = register_netdev(ndev);
+	if (ret) {
+		netdev_err(ndev, "Failed to register netdev\n");
+		goto err1;
+	}
+
+	return 0;
+
+err1:
+	/*
+	 * If necessary, free_netdev() will call netif_napi_del() and undo
+	 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
+	 */
+
+	xgene_enet_delete_desc_rings(pdata);
+
+err2:
+	if (pdata->mdio_driver)
+		xgene_enet_phy_disconnect(pdata);
+	else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
+		xgene_enet_mdio_remove(pdata);
+err:
+	free_netdev(ndev);
+	return ret;
+}
+
+static int xgene_enet_remove(struct platform_device *pdev)
+{
+	struct xgene_enet_pdata *pdata;
+	struct net_device *ndev;
+
+	pdata = platform_get_drvdata(pdev);
+	ndev = pdata->ndev;
+
+	rtnl_lock();
+	if (netif_running(ndev))
+		dev_close(ndev);
+	rtnl_unlock();
+
+	if (pdata->mdio_driver)
+		xgene_enet_phy_disconnect(pdata);
+	else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
+		xgene_enet_mdio_remove(pdata);
+
+	unregister_netdev(ndev);
+	xgene_enet_delete_desc_rings(pdata);
+	pdata->port_ops->shutdown(pdata);
+	free_netdev(ndev);
+
+	return 0;
+}
+
+static void xgene_enet_shutdown(struct platform_device *pdev)
+{
+	struct xgene_enet_pdata *pdata;
+
+	pdata = platform_get_drvdata(pdev);
+	if (!pdata)
+		return;
+
+	if (!pdata->ndev)
+		return;
+
+	xgene_enet_remove(pdev);
+}
+
+static struct platform_driver xgene_enet_driver = {
+	.driver = {
+		   .name = "xgene-enet",
+		   .of_match_table = of_match_ptr(xgene_enet_of_match),
+		   .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
+	},
+	.probe = xgene_enet_probe,
+	.remove = xgene_enet_remove,
+	.shutdown = xgene_enet_shutdown,
+};
+
+module_platform_driver(xgene_enet_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
+MODULE_VERSION(XGENE_DRV_VERSION);
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
new file mode 100644
index 0000000..9857685
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -0,0 +1,277 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_MAIN_H__
+#define __XGENE_ENET_MAIN_H__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <linux/prefetch.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include "xgene_enet_hw.h"
+#include "xgene_enet_cle.h"
+#include "xgene_enet_ring2.h"
+#include "../../../phy/mdio-xgene.h"
+
+#define XGENE_DRV_VERSION	"v1.0"
+#define ETHER_MIN_PACKET	64
+#define ETHER_STD_PACKET	1518
+#define XGENE_ENET_STD_MTU	1536
+#define XGENE_ENET_MAX_MTU	9600
+#define SKB_BUFFER_SIZE		(XGENE_ENET_STD_MTU - NET_IP_ALIGN)
+
+#define BUFLEN_16K	(16 * 1024)
+#define NUM_PKT_BUF	1024
+#define NUM_BUFPOOL	32
+#define NUM_NXTBUFPOOL	8
+#define MAX_EXP_BUFFS	256
+#define NUM_MSS_REG	4
+#define XGENE_MIN_ENET_FRAME_SIZE	60
+
+#define XGENE_MAX_ENET_IRQ	16
+#define XGENE_NUM_RX_RING	8
+#define XGENE_NUM_TX_RING	8
+#define XGENE_NUM_TXC_RING	8
+
+#define START_CPU_BUFNUM_0	0
+#define START_ETH_BUFNUM_0	2
+#define START_BP_BUFNUM_0	0x22
+#define START_RING_NUM_0	8
+#define START_CPU_BUFNUM_1	12
+#define START_ETH_BUFNUM_1	10
+#define START_BP_BUFNUM_1	0x2A
+#define START_RING_NUM_1	264
+
+#define XG_START_CPU_BUFNUM_1	12
+#define XG_START_ETH_BUFNUM_1	2
+#define XG_START_BP_BUFNUM_1	0x22
+#define XG_START_RING_NUM_1	264
+
+#define X2_START_CPU_BUFNUM_0	0
+#define X2_START_ETH_BUFNUM_0	0
+#define X2_START_BP_BUFNUM_0	0x20
+#define X2_START_RING_NUM_0	0
+#define X2_START_CPU_BUFNUM_1	0xc
+#define X2_START_ETH_BUFNUM_1	0
+#define X2_START_BP_BUFNUM_1	0x20
+#define X2_START_RING_NUM_1	256
+
+#define IRQ_ID_SIZE		16
+
+#define PHY_POLL_LINK_ON	(10 * HZ)
+#define PHY_POLL_LINK_OFF	(PHY_POLL_LINK_ON / 5)
+
+enum xgene_enet_id {
+	XGENE_ENET1 = 1,
+	XGENE_ENET2
+};
+
+enum xgene_enet_buf_len {
+	SIZE_2K = 2048,
+	SIZE_4K = 4096,
+	SIZE_16K = 16384
+};
+
+/* software context of a descriptor ring */
+struct xgene_enet_desc_ring {
+	struct net_device *ndev;
+	u16 id;
+	u16 num;
+	u16 head;
+	u16 tail;
+	u16 exp_buf_tail;
+	u16 slots;
+	u16 irq;
+	char irq_name[IRQ_ID_SIZE];
+	u32 size;
+	u32 state[X2_NUM_RING_CONFIG];
+	void __iomem *cmd_base;
+	void __iomem *cmd;
+	dma_addr_t dma;
+	dma_addr_t irq_mbox_dma;
+	void *irq_mbox_addr;
+	u16 dst_ring_num;
+	u16 nbufpool;
+	int npagepool;
+	u8 index;
+	u32 flags;
+	struct sk_buff *(*rx_skb);
+	struct sk_buff *(*cp_skb);
+	dma_addr_t *frag_dma_addr;
+	struct page *(*frag_page);
+	enum xgene_enet_ring_cfgsize cfgsize;
+	struct xgene_enet_desc_ring *cp_ring;
+	struct xgene_enet_desc_ring *buf_pool;
+	struct xgene_enet_desc_ring *page_pool;
+	struct napi_struct napi;
+	union {
+		void *desc_addr;
+		struct xgene_enet_raw_desc *raw_desc;
+		struct xgene_enet_raw_desc16 *raw_desc16;
+	};
+	__le64 *exp_bufs;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 tx_dropped;
+	u64 tx_errors;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_dropped;
+	u64 rx_errors;
+	u64 rx_length_errors;
+	u64 rx_crc_errors;
+	u64 rx_frame_errors;
+	u64 rx_fifo_errors;
+};
+
+struct xgene_mac_ops {
+	void (*init)(struct xgene_enet_pdata *pdata);
+	void (*reset)(struct xgene_enet_pdata *pdata);
+	void (*tx_enable)(struct xgene_enet_pdata *pdata);
+	void (*rx_enable)(struct xgene_enet_pdata *pdata);
+	void (*tx_disable)(struct xgene_enet_pdata *pdata);
+	void (*rx_disable)(struct xgene_enet_pdata *pdata);
+	void (*get_drop_cnt)(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx);
+	void (*set_speed)(struct xgene_enet_pdata *pdata);
+	void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+	void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize);
+	void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
+	void (*link_state)(struct work_struct *work);
+	void (*enable_tx_pause)(struct xgene_enet_pdata *pdata, bool enable);
+	void (*flowctl_rx)(struct xgene_enet_pdata *pdata, bool enable);
+	void (*flowctl_tx)(struct xgene_enet_pdata *pdata, bool enable);
+};
+
+struct xgene_port_ops {
+	int (*reset)(struct xgene_enet_pdata *pdata);
+	void (*clear)(struct xgene_enet_pdata *pdata,
+		      struct xgene_enet_desc_ring *ring);
+	void (*cle_bypass)(struct xgene_enet_pdata *pdata,
+			   u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id);
+	void (*shutdown)(struct xgene_enet_pdata *pdata);
+};
+
+struct xgene_ring_ops {
+	u8 num_ring_config;
+	u8 num_ring_id_shift;
+	struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+	void (*clear)(struct xgene_enet_desc_ring *);
+	void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+	u32 (*len)(struct xgene_enet_desc_ring *);
+	void (*coalesce)(struct xgene_enet_desc_ring *);
+};
+
+struct xgene_cle_ops {
+	int (*cle_init)(struct xgene_enet_pdata *pdata);
+};
+
+/* ethernet private data */
+struct xgene_enet_pdata {
+	struct net_device *ndev;
+	struct mii_bus *mdio_bus;
+	int phy_speed;
+	struct clk *clk;
+	struct platform_device *pdev;
+	enum xgene_enet_id enet_id;
+	struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING];
+	struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING];
+	u16 tx_level[XGENE_NUM_TX_RING];
+	u16 txc_level[XGENE_NUM_TX_RING];
+	char *dev_name;
+	u32 rx_buff_cnt;
+	u32 tx_qcnt_hi;
+	u32 irqs[XGENE_MAX_ENET_IRQ];
+	u8 rxq_cnt;
+	u8 txq_cnt;
+	u8 cq_cnt;
+	void __iomem *eth_csr_addr;
+	void __iomem *eth_ring_if_addr;
+	void __iomem *eth_diag_csr_addr;
+	void __iomem *mcx_mac_addr;
+	void __iomem *mcx_mac_csr_addr;
+	void __iomem *mcx_stats_addr;
+	void __iomem *base_addr;
+	void __iomem *pcs_addr;
+	void __iomem *ring_csr_addr;
+	void __iomem *ring_cmd_addr;
+	int phy_mode;
+	enum xgene_enet_rm rm;
+	struct xgene_enet_cle cle;
+	u64 *extd_stats;
+	u64 false_rflr;
+	u64 vlan_rjbr;
+	spinlock_t stats_lock; /* statistics lock */
+	const struct xgene_mac_ops *mac_ops;
+	spinlock_t mac_lock; /* mac lock */
+	const struct xgene_port_ops *port_ops;
+	struct xgene_ring_ops *ring_ops;
+	const struct xgene_cle_ops *cle_ops;
+	struct delayed_work link_work;
+	u32 port_id;
+	u8 cpu_bufnum;
+	u8 eth_bufnum;
+	u8 bp_bufnum;
+	u16 ring_num;
+	u32 mss[NUM_MSS_REG];
+	u32 mss_refcnt[NUM_MSS_REG];
+	spinlock_t mss_lock;  /* mss lock */
+	u8 tx_delay;
+	u8 rx_delay;
+	bool mdio_driver;
+	struct gpio_desc *sfp_rdy;
+	bool sfp_gpio_en;
+	u32 pause_autoneg;
+	bool tx_pause;
+	bool rx_pause;
+};
+
+struct xgene_indirect_ctl {
+	void __iomem *addr;
+	void __iomem *ctl;
+	void __iomem *cmd;
+	void __iomem *cmd_done;
+};
+
+static inline struct device *ndev_to_dev(struct net_device *ndev)
+{
+	return ndev->dev.parent;
+}
+
+static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	return ((u16)pdata->rm << 10) | ring->num;
+}
+
+void xgene_enet_set_ethtool_ops(struct net_device *netdev);
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata);
+
+#endif /* __XGENE_ENET_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644
index 0000000..4ff4055
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -0,0 +1,215 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	u64 addr = ring->dma;
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+		ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+		ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+	}
+	ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
+
+	addr >>= 8;
+	ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+	addr >>= 27;
+	ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+		    | ACCEPTLERR
+		    | SET_VAL(RINGADDRH, addr);
+	ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+	ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	bool is_bufpool;
+	u32 val;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+	ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+	if (is_bufpool)
+		ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+
+	ring_cfg[3] |= RECOMBBUF;
+	ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+				 u32 offset, u32 data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+	int i;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+				     ring->state[i]);
+	}
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	memset(ring->state, 0, sizeof(ring->state));
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	enum xgene_ring_owner owner;
+
+	xgene_enet_ring_set_type(ring);
+
+	owner = xgene_enet_ring_owner(ring->id);
+	if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+		xgene_enet_ring_set_recombbuf(ring);
+
+	xgene_enet_ring_init(ring);
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id_val, ring_id_buf;
+	bool is_bufpool;
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+		return;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+	ring_id_val = ring->id & GENMASK(9, 0);
+	ring_id_val |= OVERWRITE;
+
+	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+	ring_id_buf |= PREFETCH_BUF_EN;
+
+	if (is_bufpool)
+		ring_id_buf |= IS_BUFFER_POOL;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id;
+
+	ring_id = ring->id | OVERWRITE;
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+				    struct xgene_enet_desc_ring *ring)
+{
+	bool is_bufpool;
+	u32 addr, i;
+
+	xgene_enet_clr_ring_state(ring);
+	xgene_enet_set_ring_state(ring);
+	xgene_enet_set_ring_id(ring);
+
+	ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+		return ring;
+
+	addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+	xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+	for (i = 0; i < ring->slots; i++)
+		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+	return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+	xgene_enet_clr_desc_ring_id(ring);
+	xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+	u32 data = 0;
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+		data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+		       INTR_CLEAR;
+	}
+	data |= (count & GENMASK(16, 0));
+
+	iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+	u32 __iomem *cmd_base = ring->cmd_base;
+	u32 ring_state, num_msgs;
+
+	ring_state = ioread32(&cmd_base[1]);
+	num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+	return num_msgs;
+}
+
+static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
+{
+	u32 data = 0x77777777;
+
+	xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
+	xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
+	xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+	.num_ring_config = X2_NUM_RING_CONFIG,
+	.num_ring_id_shift = 13,
+	.setup = xgene_enet_setup_ring,
+	.clear = xgene_enet_clear_ring,
+	.wr_cmd = xgene_enet_wr_cmd,
+	.len = xgene_enet_ring_len,
+	.coalesce = xgene_enet_setup_coalescing,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644
index 0000000..8b235db
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG	6
+
+#define INTR_MBOX_SIZE		1024
+#define CSR_VMID0_INTR_MBOX	0x0270
+#define INTR_CLEAR		BIT(23)
+
+#define X2_MSG_AM_POS		10
+#define X2_QBASE_AM_POS		11
+#define X2_INTLINE_POS		24
+#define X2_INTLINE_LEN		5
+#define X2_CFGCRID_POS		29
+#define X2_CFGCRID_LEN		3
+#define X2_SELTHRSH_POS		7
+#define X2_SELTHRSH_LEN		3
+#define X2_RINGTYPE_POS		23
+#define X2_RINGTYPE_LEN		2
+#define X2_DEQINTEN_POS		29
+#define X2_RECOMTIMEOUT_POS	0
+#define X2_RECOMTIMEOUT_LEN	7
+#define X2_NUMMSGSINQ_POS	0
+#define X2_NUMMSGSINQ_LEN	17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
new file mode 100644
index 0000000..b1a83fd
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -0,0 +1,616 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
+
+static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
+{
+	iowrite32(val, p->eth_csr_addr + offset);
+}
+
+static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
+				     u32 val)
+{
+	iowrite32(val, p->base_addr + offset);
+}
+
+static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
+				  u32 offset, u32 val)
+{
+	iowrite32(val, p->eth_ring_if_addr + offset);
+}
+
+static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
+				   u32 offset, u32 val)
+{
+	iowrite32(val, p->eth_diag_csr_addr + offset);
+}
+
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
+{
+	return ioread32(p->eth_csr_addr + offset);
+}
+
+static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
+{
+	return ioread32(p->eth_diag_csr_addr + offset);
+}
+
+static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
+{
+	return ioread32(p->mcx_mac_csr_addr + offset);
+}
+
+static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
+{
+	struct net_device *ndev = p->ndev;
+	u32 data, shutdown;
+	int i = 0;
+
+	shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
+	data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
+
+	if (!shutdown && data == ~0U) {
+		netdev_dbg(ndev, "+ ecc_init done, skipping\n");
+		return 0;
+	}
+
+	xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
+	do {
+		usleep_range(100, 110);
+		data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
+		if (data == ~0U)
+			return 0;
+	} while (++i < 10);
+
+	netdev_err(ndev, "Failed to release memory from shutdown\n");
+	return -ENODEV;
+}
+
+static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+				     u32 *rx, u32 *tx)
+{
+	u32 addr, count;
+
+	addr = (pdata->enet_id != XGENE_ENET1) ?
+		XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR :
+		ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4;
+	count = xgene_enet_rd_mcx_csr(pdata, addr);
+	*rx = ICM_DROP_COUNT(count);
+	*tx = ECM_DROP_COUNT(count);
+	/* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+	addr = (pdata->enet_id != XGENE_ENET1) ?
+		XG_MCX_ECM_CONFIG0_REG_0_ADDR :
+		ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4;
+	xgene_enet_rd_mcx_csr(pdata, addr);
+}
+
+static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
+{
+	u32 val;
+
+	val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
+	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
+}
+
+static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
+				u32 reg, u16 data)
+{
+	u32 addr, wr_data, done;
+	int i;
+
+	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
+	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
+
+	wr_data = PHY_CONTROL(data);
+	xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
+
+	for (i = 0; i < 10; i++) {
+		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
+		if (!(done & BUSY_MASK))
+			return;
+		usleep_range(10, 20);
+	}
+
+	netdev_err(p->ndev, "MII_MGMT write failed\n");
+}
+
+static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
+{
+	u32 addr, data, done;
+	int i;
+
+	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
+	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
+	xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
+
+	for (i = 0; i < 10; i++) {
+		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
+		if (!(done & BUSY_MASK)) {
+			data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
+			xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
+
+			return data;
+		}
+		usleep_range(10, 20);
+	}
+
+	netdev_err(p->ndev, "MII_MGMT read failed\n");
+
+	return 0;
+}
+
+static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
+{
+	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
+	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
+}
+
+static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
+{
+	u32 addr0, addr1;
+	u8 *dev_addr = p->ndev->dev_addr;
+
+	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+		(dev_addr[1] << 8) | dev_addr[0];
+	xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
+
+	addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
+	addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
+	xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
+}
+
+static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
+{
+	u32 data;
+
+	data = xgene_mii_phy_read(p, INT_PHY_ADDR,
+				  SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
+
+	if (LINK_SPEED(data) == PHY_SPEED_1000)
+		p->phy_speed = SPEED_1000;
+	else if (LINK_SPEED(data) == PHY_SPEED_100)
+		p->phy_speed = SPEED_100;
+	else
+		p->phy_speed = SPEED_10;
+
+	return data & LINK_UP;
+}
+
+static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
+{
+	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
+			    0x8000);
+	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
+	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
+}
+
+static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
+{
+	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
+			    0x8000);
+	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
+}
+
+static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
+{
+	u32 value;
+
+	if (p->phy_speed == SPEED_UNKNOWN)
+		return;
+
+	value = xgene_mii_phy_read(p, INT_PHY_ADDR,
+				   SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
+	if (!(value & LINK_UP))
+		xgene_sgmii_tbi_control_reset(p);
+}
+
+static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
+{
+	u32 icm0_addr, icm2_addr, debug_addr;
+	u32 icm0, icm2, intf_ctl;
+	u32 mc2, value;
+
+	xgene_sgmii_reset(p);
+
+	if (p->enet_id == XGENE_ENET1) {
+		icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
+		icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
+		debug_addr = DEBUG_REG_ADDR;
+	} else {
+		icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR;
+		icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR;
+		debug_addr = XG_DEBUG_REG_ADDR;
+	}
+
+	icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
+	icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
+	mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
+	intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
+
+	switch (p->phy_speed) {
+	case SPEED_10:
+		ENET_INTERFACE_MODE2_SET(&mc2, 1);
+		intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
+		CFG_MACMODE_SET(&icm0, 0);
+		CFG_WAITASYNCRD_SET(&icm2, 500);
+		break;
+	case SPEED_100:
+		ENET_INTERFACE_MODE2_SET(&mc2, 1);
+		intf_ctl &= ~ENET_GHD_MODE;
+		intf_ctl |= ENET_LHD_MODE;
+		CFG_MACMODE_SET(&icm0, 1);
+		CFG_WAITASYNCRD_SET(&icm2, 80);
+		break;
+	default:
+		ENET_INTERFACE_MODE2_SET(&mc2, 2);
+		intf_ctl &= ~ENET_LHD_MODE;
+		intf_ctl |= ENET_GHD_MODE;
+		CFG_MACMODE_SET(&icm0, 2);
+		CFG_WAITASYNCRD_SET(&icm2, 16);
+		value = xgene_enet_rd_csr(p, debug_addr);
+		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
+		xgene_enet_wr_csr(p, debug_addr, value);
+		break;
+	}
+
+	mc2 |= FULL_DUPLEX2 | PAD_CRC;
+	xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
+	xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
+	xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
+	xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
+}
+
+static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
+{
+	xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+}
+
+static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
+{
+	u32 data, loop = 10;
+
+	xgene_sgmii_configure(p);
+
+	while (loop--) {
+		data = xgene_mii_phy_read(p, INT_PHY_ADDR,
+					  SGMII_STATUS_ADDR >> 2);
+		if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
+		netdev_err(p->ndev, "Auto-negotiation failed\n");
+}
+
+static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
+
+	if (set)
+		data |= bits;
+	else
+		data &= ~bits;
+
+	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
+}
+
+static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
+{
+	xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
+
+	p->mac_ops->enable_tx_pause(p, enable);
+}
+
+static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
+{
+	xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable);
+}
+
+static void xgene_sgmac_init(struct xgene_enet_pdata *p)
+{
+	u32 pause_thres_reg, pause_off_thres_reg;
+	u32 enet_spare_cfg_reg, rsif_config_reg;
+	u32 cfg_bypass_reg, rx_dv_gate_reg;
+	u32 data, data1, data2, offset;
+	u32 multi_dpf_reg;
+
+	if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
+		xgene_sgmac_reset(p);
+
+	xgene_sgmii_enable_autoneg(p);
+	xgene_sgmac_set_speed(p);
+	xgene_sgmac_set_mac_addr(p);
+
+	if (p->enet_id == XGENE_ENET1) {
+		enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+		rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+		cfg_bypass_reg = CFG_BYPASS_ADDR;
+		offset = p->port_id * OFFSET_4;
+		rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset;
+	} else {
+		enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+		rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+		cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+		rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+	}
+
+	data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
+	data |= MPA_IDLE_WITH_QMI_EMPTY;
+	xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
+
+	/* Adjust MDC clock frequency */
+	data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
+	MGMT_CLOCK_SEL_SET(&data, 7);
+	xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
+
+	/* Enable drop if bufpool not available */
+	data = xgene_enet_rd_csr(p, rsif_config_reg);
+	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+	xgene_enet_wr_csr(p, rsif_config_reg, data);
+
+	/* Configure HW pause frame generation */
+	multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
+			 XG_MCX_MULTI_DPF0_ADDR;
+	data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
+	data = (DEF_QUANTA << 16) | (data & 0xffff);
+	xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
+
+	if (p->enet_id != XGENE_ENET1) {
+		data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
+		data =  (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
+		xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
+	}
+
+	pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
+			   XG_RXBUF_PAUSE_THRESH;
+	pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
+			       RXBUF_PAUSE_OFF_THRESH : 0;
+
+	if (p->enet_id == XGENE_ENET1) {
+		data1 = xgene_enet_rd_csr(p, pause_thres_reg);
+		data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
+
+		if (!(p->port_id % 2)) {
+			data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES;
+			data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES;
+		} else {
+			data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16);
+			data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16);
+		}
+
+		xgene_enet_wr_csr(p, pause_thres_reg, data1);
+		xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
+	} else {
+		data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES;
+		xgene_enet_wr_csr(p, pause_thres_reg, data);
+	}
+
+	xgene_sgmac_flowctl_tx(p, p->tx_pause);
+	xgene_sgmac_flowctl_rx(p, p->rx_pause);
+
+	/* Bypass traffic gating */
+	xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+	xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+	xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
+}
+
+static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
+{
+	xgene_sgmac_rxtx(p, RX_EN, true);
+}
+
+static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
+{
+	xgene_sgmac_rxtx(p, TX_EN, true);
+}
+
+static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
+{
+	xgene_sgmac_rxtx(p, RX_EN, false);
+}
+
+static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
+{
+	xgene_sgmac_rxtx(p, TX_EN, false);
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *p)
+{
+	struct device *dev = &p->pdev->dev;
+
+	if (!xgene_ring_mgr_init(p))
+		return -ENODEV;
+
+	if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
+		xgene_enet_config_ring_if_assoc(p);
+		return 0;
+	}
+
+	if (p->enet_id == XGENE_ENET2)
+		xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
+
+	if (dev->of_node) {
+		if (!IS_ERR(p->clk)) {
+			clk_prepare_enable(p->clk);
+			udelay(5);
+			clk_disable_unprepare(p->clk);
+			udelay(5);
+			clk_prepare_enable(p->clk);
+			udelay(5);
+		}
+	} else {
+#ifdef CONFIG_ACPI
+		if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
+			acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+					     "_RST", NULL, NULL);
+		else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+			acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+					     "_INI", NULL, NULL);
+#endif
+	}
+
+	if (!p->port_id) {
+		xgene_enet_ecc_init(p);
+		xgene_enet_config_ring_if_assoc(p);
+	}
+
+	return 0;
+}
+
+static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
+				  u32 dst_ring_num, u16 bufpool_id,
+				  u16 nxtbufpool_id)
+{
+	u32 cle_bypass_reg0, cle_bypass_reg1;
+	u32 offset = p->port_id * MAC_OFFSET;
+	u32 data, fpsel, nxtfpsel;
+
+	if (p->enet_id == XGENE_ENET1) {
+		cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+		cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+	} else {
+		cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+		cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+	}
+
+	data = CFG_CLE_BYPASS_EN0;
+	xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
+
+	fpsel = xgene_enet_get_fpsel(bufpool_id);
+	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
+	data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
+	       CFG_CLE_NXTFPSEL0(nxtfpsel);
+	xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
+}
+
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+			     struct xgene_enet_desc_ring *ring)
+{
+	u32 addr, data;
+
+	if (xgene_enet_is_bufpool(ring->id)) {
+		addr = ENET_CFGSSQMIFPRESET_ADDR;
+		data = BIT(xgene_enet_get_fpsel(ring->id));
+	} else {
+		addr = ENET_CFGSSQMIWQRESET_ADDR;
+		data = BIT(xgene_enet_ring_bufnum(ring->id));
+	}
+
+	xgene_enet_wr_ring_if(pdata, addr, data);
+}
+
+static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
+{
+	struct device *dev = &p->pdev->dev;
+
+	if (dev->of_node) {
+		if (!IS_ERR(p->clk))
+			clk_disable_unprepare(p->clk);
+	}
+}
+
+static void xgene_enet_link_state(struct work_struct *work)
+{
+	struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
+				     struct xgene_enet_pdata, link_work);
+	struct net_device *ndev = p->ndev;
+	u32 link, poll_interval;
+
+	link = xgene_enet_link_status(p);
+	if (link) {
+		if (!netif_carrier_ok(ndev)) {
+			netif_carrier_on(ndev);
+			xgene_sgmac_set_speed(p);
+			xgene_sgmac_rx_enable(p);
+			xgene_sgmac_tx_enable(p);
+			netdev_info(ndev, "Link is Up - %dMbps\n",
+				    p->phy_speed);
+		}
+		poll_interval = PHY_POLL_LINK_ON;
+	} else {
+		if (netif_carrier_ok(ndev)) {
+			xgene_sgmac_rx_disable(p);
+			xgene_sgmac_tx_disable(p);
+			netif_carrier_off(ndev);
+			netdev_info(ndev, "Link is Down\n");
+		}
+		poll_interval = PHY_POLL_LINK_OFF;
+	}
+
+	schedule_delayed_work(&p->link_work, poll_interval);
+}
+
+static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
+{
+	u32 data, ecm_cfg_addr;
+
+	if (p->enet_id == XGENE_ENET1) {
+		ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
+				CSR_ECM_CFG_1_ADDR;
+	} else {
+		ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR;
+	}
+
+	data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
+	if (enable)
+		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
+	else
+		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
+	xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
+}
+
+const struct xgene_mac_ops xgene_sgmac_ops = {
+	.init		= xgene_sgmac_init,
+	.reset		= xgene_sgmac_reset,
+	.rx_enable	= xgene_sgmac_rx_enable,
+	.tx_enable	= xgene_sgmac_tx_enable,
+	.rx_disable	= xgene_sgmac_rx_disable,
+	.tx_disable	= xgene_sgmac_tx_disable,
+	.get_drop_cnt   = xgene_sgmac_get_drop_cnt,
+	.set_speed	= xgene_sgmac_set_speed,
+	.set_mac_addr	= xgene_sgmac_set_mac_addr,
+	.set_framesize  = xgene_sgmac_set_frame_size,
+	.link_state	= xgene_enet_link_state,
+	.enable_tx_pause = xgene_sgmac_enable_tx_pause,
+	.flowctl_tx     = xgene_sgmac_flowctl_tx,
+	.flowctl_rx     = xgene_sgmac_flowctl_rx
+};
+
+const struct xgene_port_ops xgene_sgport_ops = {
+	.reset		= xgene_enet_reset,
+	.clear		= xgene_enet_clear,
+	.cle_bypass	= xgene_enet_cle_bypass,
+	.shutdown	= xgene_enet_shutdown
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
new file mode 100644
index 0000000..3d0ba37
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_SGMAC_H__
+#define __XGENE_ENET_SGMAC_H__
+
+#define PHY_ADDR(src)		(((src)<<8) & GENMASK(12, 8))
+#define REG_ADDR(src)		((src) & GENMASK(4, 0))
+#define PHY_CONTROL(src)	((src) & GENMASK(15, 0))
+#define LINK_SPEED(src)		(((src) & GENMASK(11, 10)) >> 10)
+#define INT_PHY_ADDR			0x1e
+#define SGMII_TBI_CONTROL_ADDR		0x44
+#define SGMII_CONTROL_ADDR		0x00
+#define SGMII_STATUS_ADDR		0x04
+#define SGMII_BASE_PAGE_ABILITY_ADDR	0x14
+#define AUTO_NEG_COMPLETE		BIT(5)
+#define LINK_STATUS			BIT(2)
+#define LINK_UP				BIT(15)
+#define MPA_IDLE_WITH_QMI_EMPTY		BIT(12)
+#define SG_RX_DV_GATE_REG_0_ADDR	0x05fc
+#define SGMII_EN			0x1
+
+enum xgene_phy_speed {
+	PHY_SPEED_10,
+	PHY_SPEED_100,
+	PHY_SPEED_1000
+};
+
+extern const struct xgene_mac_ops xgene_sgmac_ops;
+extern const struct xgene_port_ops xgene_sgport_ops;
+
+#endif  /* __XGENE_ENET_SGMAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
new file mode 100644
index 0000000..b7d75d0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -0,0 +1,544 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_xgmac.h"
+
+static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_ring_if_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
+				   void __iomem *cmd, void __iomem *cmd_done,
+				   u32 wr_addr, u32 wr_data)
+{
+	u32 done;
+	u8 wait = 10;
+
+	iowrite32(wr_addr, addr);
+	iowrite32(wr_data, wr);
+	iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+	/* wait for write command to complete */
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		return false;
+
+	iowrite32(0, cmd);
+
+	return true;
+}
+
+static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
+			      u32 wr_addr, u32 wr_data)
+{
+	void __iomem *addr, *wr, *cmd, *cmd_done;
+
+	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
+	wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
+	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
+	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
+
+	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
+		netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
+			   wr_addr);
+}
+
+static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->eth_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
+				   void __iomem *cmd, void __iomem *cmd_done,
+				   u32 rd_addr, u32 *rd_data)
+{
+	u32 done;
+	u8 wait = 10;
+
+	iowrite32(rd_addr, addr);
+	iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+	/* wait for read command to complete */
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		return false;
+
+	*rd_data = ioread32(rd);
+	iowrite32(0, cmd);
+
+	return true;
+}
+
+static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
+			      u32 rd_addr, u32 *rd_data)
+{
+	void __iomem *addr, *rd, *cmd, *cmd_done;
+	bool success;
+
+	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
+	rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
+	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
+	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
+
+	success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
+	if (!success)
+		netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
+			   rd_addr);
+
+	return success;
+}
+
+static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	u32 data;
+	u8 wait = 10;
+
+	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
+	do {
+		usleep_range(100, 110);
+		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
+	} while ((data != 0xffffffff) && wait--);
+
+	if (data != 0xffffffff) {
+		netdev_err(ndev, "Failed to release memory from shutdown\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+				     u32 *rx, u32 *tx)
+{
+	u32 count;
+
+	xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count);
+	*rx = ICM_DROP_COUNT(count);
+	*tx = ECM_DROP_COUNT(count);
+	/* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+	xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count);
+}
+
+static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
+}
+
+static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
+}
+
+static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data))
+		return;
+
+	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
+	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
+}
+
+static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+{
+	u32 addr0, addr1;
+	u8 *dev_addr = pdata->ndev->dev_addr;
+
+	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+		(dev_addr[1] << 8) | dev_addr[0];
+	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+	xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
+	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
+}
+
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
+				u16 mss, u8 index)
+{
+	u8 offset;
+	u32 data;
+
+	offset = (index < 2) ? 0 : 4;
+	xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
+
+	if (!(index & 0x1))
+		data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
+			SET_VAL(TSO_MSS0, mss);
+	else
+		data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
+
+	xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
+}
+
+static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
+{
+	xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR,
+			  ((((size + 2) >> 2) << 16) | size));
+}
+
+static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
+
+	return data;
+}
+
+static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
+					bool enable)
+{
+	u32 data;
+
+	xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data);
+
+	if (enable)
+		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
+	else
+		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
+
+	xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data);
+}
+
+static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+
+	if (enable)
+		data |= HSTTCTLEN;
+	else
+		data &= ~HSTTCTLEN;
+
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+
+	pdata->mac_ops->enable_tx_pause(pdata, enable);
+}
+
+static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+
+	if (enable)
+		data |= HSTRCTLEN;
+	else
+		data &= ~HSTRCTLEN;
+
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+}
+
+static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_xgmac_reset(pdata);
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+	data |= HSTPPEN;
+	data &= ~HSTLENCHK;
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+
+	xgene_xgmac_set_mac_addr(pdata);
+
+	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
+	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+	/* Errata 10GE_1 - FIFO threshold default value incorrect */
+	RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
+	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
+
+	/* Errata 10GE_1 - FIFO threshold default value incorrect */
+	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
+	RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
+	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
+
+	xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
+	data |= BIT(12);
+	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
+	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
+	xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
+	xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
+
+	/* Configure HW pause frame generation */
+	xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data);
+	data = (DEF_QUANTA << 16) | (data & 0xFFFF);
+	xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data);
+
+	if (pdata->enet_id != XGENE_ENET1) {
+		xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data);
+		data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
+		xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data);
+	}
+
+	data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES;
+	xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data);
+
+	xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause);
+	xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause);
+}
+
+static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
+}
+
+static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
+}
+
+static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
+}
+
+static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	if (!xgene_ring_mgr_init(pdata))
+		return -ENODEV;
+
+	if (dev->of_node) {
+		clk_prepare_enable(pdata->clk);
+		udelay(5);
+		clk_disable_unprepare(pdata->clk);
+		udelay(5);
+		clk_prepare_enable(pdata->clk);
+		udelay(5);
+	} else {
+#ifdef CONFIG_ACPI
+		if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
+			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+					     "_RST", NULL, NULL);
+		} else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
+					   "_INI")) {
+			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+					     "_INI", NULL, NULL);
+		}
+#endif
+	}
+
+	xgene_enet_ecc_init(pdata);
+	xgene_enet_config_ring_if_assoc(pdata);
+
+	return 0;
+}
+
+static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
+				    u32 dst_ring_num, u16 bufpool_id,
+				    u16 nxtbufpool_id)
+{
+	u32 cb, fpsel, nxtfpsel;
+
+	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
+	cb |= CFG_CLE_BYPASS_EN0;
+	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
+
+	fpsel = xgene_enet_get_fpsel(bufpool_id);
+	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
+	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
+	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
+	CFG_CLE_FPSEL0_SET(&cb, fpsel);
+	CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
+	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
+	pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel);
+}
+
+static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	if (dev->of_node) {
+		if (!IS_ERR(pdata->clk))
+			clk_disable_unprepare(pdata->clk);
+	}
+}
+
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+			     struct xgene_enet_desc_ring *ring)
+{
+	u32 addr, data;
+
+	if (xgene_enet_is_bufpool(ring->id)) {
+		addr = ENET_CFGSSQMIFPRESET_ADDR;
+		data = BIT(xgene_enet_get_fpsel(ring->id));
+	} else {
+		addr = ENET_CFGSSQMIWQRESET_ADDR;
+		data = BIT(xgene_enet_ring_bufnum(ring->id));
+	}
+
+	xgene_enet_wr_ring_if(pdata, addr, data);
+}
+
+static int xgene_enet_gpio_lookup(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+
+	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
+	if (IS_ERR(pdata->sfp_rdy))
+		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
+
+	if (IS_ERR(pdata->sfp_rdy))
+		return -ENODEV;
+
+	return 0;
+}
+
+static void xgene_enet_link_state(struct work_struct *work)
+{
+	struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
+					 struct xgene_enet_pdata, link_work);
+	struct net_device *ndev = pdata->ndev;
+	u32 link_status, poll_interval;
+
+	link_status = xgene_enet_link_status(pdata);
+	if (pdata->sfp_gpio_en && link_status &&
+	    (!IS_ERR(pdata->sfp_rdy) || !xgene_enet_gpio_lookup(pdata)) &&
+	    !gpiod_get_value(pdata->sfp_rdy))
+		link_status = 0;
+
+	if (link_status) {
+		if (!netif_carrier_ok(ndev)) {
+			netif_carrier_on(ndev);
+			xgene_xgmac_rx_enable(pdata);
+			xgene_xgmac_tx_enable(pdata);
+			netdev_info(ndev, "Link is Up - 10Gbps\n");
+		}
+		poll_interval = PHY_POLL_LINK_ON;
+	} else {
+		if (netif_carrier_ok(ndev)) {
+			xgene_xgmac_rx_disable(pdata);
+			xgene_xgmac_tx_disable(pdata);
+			netif_carrier_off(ndev);
+			netdev_info(ndev, "Link is Down\n");
+		}
+		poll_interval = PHY_POLL_LINK_OFF;
+
+		xgene_pcs_reset(pdata);
+	}
+
+	schedule_delayed_work(&pdata->link_work, poll_interval);
+}
+
+const struct xgene_mac_ops xgene_xgmac_ops = {
+	.init = xgene_xgmac_init,
+	.reset = xgene_xgmac_reset,
+	.rx_enable = xgene_xgmac_rx_enable,
+	.tx_enable = xgene_xgmac_tx_enable,
+	.rx_disable = xgene_xgmac_rx_disable,
+	.tx_disable = xgene_xgmac_tx_disable,
+	.set_mac_addr = xgene_xgmac_set_mac_addr,
+	.set_framesize = xgene_xgmac_set_frame_size,
+	.set_mss = xgene_xgmac_set_mss,
+	.get_drop_cnt = xgene_xgmac_get_drop_cnt,
+	.link_state = xgene_enet_link_state,
+	.enable_tx_pause = xgene_xgmac_enable_tx_pause,
+	.flowctl_rx = xgene_xgmac_flowctl_rx,
+	.flowctl_tx = xgene_xgmac_flowctl_tx
+};
+
+const struct xgene_port_ops xgene_xgport_ops = {
+	.reset = xgene_enet_reset,
+	.clear = xgene_enet_clear,
+	.cle_bypass = xgene_enet_xgcle_bypass,
+	.shutdown = xgene_enet_shutdown,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
new file mode 100644
index 0000000..a3b4551
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -0,0 +1,101 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_XGMAC_H__
+#define __XGENE_ENET_XGMAC_H__
+
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET	0x3000
+#define BLOCK_AXG_MAC_OFFSET		0x0800
+#define BLOCK_AXG_STATS_OFFSET		0x0800
+#define BLOCK_AXG_MAC_CSR_OFFSET	0x2000
+#define BLOCK_PCS_OFFSET		0x3800
+
+#define XGENET_CONFIG_REG_ADDR		0x20
+#define XGENET_SRST_ADDR		0x00
+#define XGENET_CLKEN_ADDR		0x08
+
+#define CSR_CLK		BIT(0)
+#define XGENET_CLK	BIT(1)
+#define PCS_CLK		BIT(3)
+#define AN_REF_CLK	BIT(4)
+#define AN_CLK		BIT(5)
+#define AD_CLK		BIT(6)
+
+#define CSR_RST		BIT(0)
+#define XGENET_RST	BIT(1)
+#define PCS_RST		BIT(3)
+#define AN_REF_RST	BIT(4)
+#define AN_RST		BIT(5)
+#define AD_RST		BIT(6)
+
+#define AXGMAC_CONFIG_0			0x0000
+#define AXGMAC_CONFIG_1			0x0004
+#define HSTMACRST			BIT(31)
+#define HSTTCTLEN			BIT(31)
+#define HSTTFEN				BIT(30)
+#define HSTRCTLEN			BIT(29)
+#define HSTRFEN				BIT(28)
+#define HSTPPEN				BIT(7)
+#define HSTDRPLT64			BIT(5)
+#define HSTLENCHK			BIT(3)
+#define HSTMACADR_LSW_ADDR		0x0010
+#define HSTMACADR_MSW_ADDR		0x0014
+#define HSTMAXFRAME_LENGTH_ADDR		0x0020
+
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR	0x0004
+#define XG_MCX_ECM_CFG_0_ADDR		0x0074
+#define XG_MCX_MULTI_DPF0_ADDR		0x007c
+#define XG_MCX_MULTI_DPF1_ADDR		0x0080
+#define XG_DEF_PAUSE_THRES		0x390
+#define XG_DEF_PAUSE_OFF_THRES		0x2c0
+#define XG_RSIF_CONFIG_REG_ADDR		0x00a0
+#define XG_RSIF_CLE_BUFF_THRESH                0x3
+#define RSIF_CLE_BUFF_THRESH_SET(dst, val)     xgene_set_bits(dst, val, 0, 3)
+#define XG_RSIF_CONFIG1_REG_ADDR       0x00b8
+#define XG_RSIF_PLC_CLE_BUFF_THRESH    0x1
+#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
+#define XG_MCX_ECM_CONFIG0_REG_0_ADDR          0x0070
+#define XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR    0x0124
+#define XCLE_BYPASS_REG0_ADDR           0x0160
+#define XCLE_BYPASS_REG1_ADDR           0x0164
+#define XG_CFG_BYPASS_ADDR		0x0204
+#define XG_CFG_LINK_AGGR_RESUME_0_ADDR	0x0214
+#define XG_LINK_STATUS_ADDR		0x0228
+#define XG_TSIF_MSS_REG0_ADDR		0x02a4
+#define XG_DEBUG_REG_ADDR		0x0400
+#define XG_ENET_SPARE_CFG_REG_ADDR	0x040c
+#define XG_ENET_SPARE_CFG_REG_1_ADDR	0x0410
+#define XGENET_RX_DV_GATE_REG_0_ADDR	0x0804
+#define XGENET_ECM_CONFIG0_REG_0	0x0870
+#define XGENET_ICM_ECM_DROP_COUNT_REG0	0x0924
+#define XGENET_CSR_ECM_CFG_0_ADDR	0x0880
+#define XGENET_CSR_MULTI_DPF0_ADDR	0x0888
+#define XGENET_CSR_MULTI_DPF1_ADDR	0x088c
+#define XG_RXBUF_PAUSE_THRESH		0x0020
+#define XG_MCX_ICM_CONFIG0_REG_0_ADDR	0x00e0
+#define XG_MCX_ICM_CONFIG2_REG_0_ADDR	0x00e8
+
+#define PCS_CONTROL_1			0x0000
+#define PCS_CTRL_PCS_RST		BIT(15)
+
+extern const struct xgene_mac_ops xgene_xgmac_ops;
+extern const struct xgene_port_ops xgene_xgport_ops;
+
+#endif /* __XGENE_ENET_XGMAC_H__ */