Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a580a3d..6a7e899 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Freescale device configuration
 #
@@ -96,5 +97,7 @@
 	  on the 8540.
 
 source "drivers/net/ethernet/freescale/dpaa/Kconfig"
+source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
+source "drivers/net/ethernet/freescale/enetc/Kconfig"
 
 endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 0914a3e..6a93293 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -21,3 +21,8 @@
 
 obj-$(CONFIG_FSL_FMAN) += fman/
 obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+
+obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index a654736..3b32573 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menuconfig FSL_DPAA_ETH
 	tristate "DPAA Ethernet"
 	depends on FSL_DPAA && FSL_FMAN
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 65a22cd..b4b82b9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -51,9 +51,9 @@
 #include <linux/percpu.h>
 #include <linux/dma-mapping.h>
 #include <linux/sort.h>
+#include <linux/phy_fixed.h>
 #include <soc/fsl/bman.h>
 #include <soc/fsl/qman.h>
-
 #include "fman.h"
 #include "fman_port.h"
 #include "mac.h"
@@ -485,7 +485,7 @@
 static bool dpaa_bpid2pool_use(int bpid)
 {
 	if (dpaa_bpid2pool(bpid)) {
-		atomic_inc(&dpaa_bp_array[bpid]->refs);
+		refcount_inc(&dpaa_bp_array[bpid]->refs);
 		return true;
 	}
 
@@ -496,7 +496,7 @@
 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
 {
 	dpaa_bp_array[bpid] = dpaa_bp;
-	atomic_set(&dpaa_bp->refs, 1);
+	refcount_set(&dpaa_bp->refs, 1);
 }
 
 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
@@ -584,7 +584,7 @@
 	if (!bp)
 		return;
 
-	if (!atomic_dec_and_test(&bp->refs))
+	if (!refcount_dec_and_test(&bp->refs))
 		return;
 
 	if (bp->free_buf_cb)
@@ -780,7 +780,7 @@
 	struct qman_portal *portal;
 	int cpu;
 
-	for_each_cpu(cpu, cpus) {
+	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
 		portal = qman_get_affine_portal(cpu);
 		qman_p_static_dequeue_add(portal, pool);
 	}
@@ -896,7 +896,7 @@
 	u16 channels[NR_CPUS];
 	struct dpaa_fq *fq;
 
-	for_each_cpu(cpu, affine_cpus)
+	for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
 		channels[num_portals++] = qman_affine_channel(cpu);
 
 	if (num_portals == 0)
@@ -1280,7 +1280,7 @@
 
 	err = bman_release(dpaa_bp->pool, bmb, cnt);
 	/* Should never occur, address anyway to avoid leaking the buffers */
-	if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
+	if (WARN_ON(err) && dpaa_bp->free_buf_cb)
 		while (cnt-- > 0)
 			dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
 
@@ -1648,7 +1648,7 @@
 				 qm_sg_entry_get_len(&sgt[0]), dma_dir);
 
 		/* remaining pages were mapped with skb_frag_dma_map() */
-		for (i = 1; i < nr_frags; i++) {
+		for (i = 1; i <= nr_frags; i++) {
 			WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
 
 			dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
@@ -1704,10 +1704,8 @@
 
 	skb = build_skb(vaddr, dpaa_bp->size +
 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-	if (unlikely(!skb)) {
-		WARN_ONCE(1, "Build skb failure on Rx\n");
+	if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
 		goto free_buffer;
-	}
 	WARN_ON(fd_off != priv->rx_headroom);
 	skb_reserve(skb, fd_off);
 	skb_put(skb, qm_fd_get_length(fd));
@@ -1770,7 +1768,7 @@
 			sz = dpaa_bp->size +
 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 			skb = build_skb(sg_vaddr, sz);
-			if (WARN_ON(unlikely(!skb)))
+			if (WARN_ON(!skb))
 				goto free_buffers;
 
 			skb->ip_summed = rx_csum_offload(priv, fd);
@@ -1960,7 +1958,7 @@
 	/* populate the rest of SGT entries */
 	for (i = 0; i < nr_frags; i++) {
 		frag = &skb_shinfo(skb)->frags[i];
-		frag_len = frag->size;
+		frag_len = skb_frag_size(frag);
 		WARN_ON(!skb_frag_page(frag));
 		addr = skb_frag_dma_map(dev, frag, 0,
 					frag_len, dma_dir);
@@ -2046,12 +2044,14 @@
 	return 0;
 }
 
-static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t
+dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
 	const int queue_mapping = skb_get_queue_mapping(skb);
 	bool nonlinear = skb_is_nonlinear(skb);
 	struct rtnl_link_stats64 *percpu_stats;
 	struct dpaa_percpu_priv *percpu_priv;
+	struct netdev_queue *txq;
 	struct dpaa_priv *priv;
 	struct qm_fd fd;
 	int offset = 0;
@@ -2101,6 +2101,11 @@
 	if (unlikely(err < 0))
 		goto skb_to_fd_failed;
 
+	txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+	/* LLTX requires to do our own update of trans_start */
+	txq->trans_start = jiffies;
+
 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 		fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -2169,7 +2174,6 @@
 	if (cleaned < budget) {
 		napi_complete_done(napi, cleaned);
 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
-
 	} else if (np->down) {
 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
 	}
@@ -2443,7 +2447,7 @@
 	struct dpaa_percpu_priv *percpu_priv;
 	int i;
 
-	for_each_possible_cpu(i) {
+	for_each_online_cpu(i) {
 		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
 
 		percpu_priv->np.down = 0;
@@ -2456,7 +2460,7 @@
 	struct dpaa_percpu_priv *percpu_priv;
 	int i;
 
-	for_each_possible_cpu(i) {
+	for_each_online_cpu(i) {
 		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
 
 		percpu_priv->np.down = 1;
@@ -2476,6 +2480,7 @@
 
 static int dpaa_phy_init(struct net_device *net_dev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	struct mac_device *mac_dev;
 	struct phy_device *phy_dev;
 	struct dpaa_priv *priv;
@@ -2492,9 +2497,10 @@
 	}
 
 	/* Remove any features not supported by the controller */
-	phy_dev->supported &= mac_dev->if_support;
-	phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-	phy_dev->advertising = phy_dev->supported;
+	ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
+	linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+
+	phy_support_asym_pause(phy_dev);
 
 	mac_dev->phy_dev = phy_dev;
 	net_dev->phydev = phy_dev;
@@ -2615,6 +2621,7 @@
 	.ndo_stop = dpaa_eth_stop,
 	.ndo_tx_timeout = dpaa_tx_timeout,
 	.ndo_get_stats64 = dpaa_get_stats64,
+	.ndo_change_carrier = fixed_phy_change_carrier,
 	.ndo_set_mac_address = dpaa_set_mac_address,
 	.ndo_validate_addr = eth_validate_addr,
 	.ndo_set_rx_mode = dpaa_set_rx_mode,
@@ -2733,8 +2740,6 @@
 	return err;
 }
 
-static const struct of_device_id dpaa_match[];
-
 static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
 {
 	u16 headroom;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index af320f8..f7e59e8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -32,6 +32,7 @@
 #define __DPAA_H
 
 #include <linux/netdevice.h>
+#include <linux/refcount.h>
 #include <soc/fsl/qman.h>
 #include <soc/fsl/bman.h>
 
@@ -99,7 +100,7 @@
 	int (*seed_cb)(struct dpaa_bp *);
 	/* bpool can be emptied before freeing by this cb */
 	void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
-	atomic_t refs;
+	refcount_t refs;
 };
 
 struct dpaa_rx_errors {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 3184c8f..7ce2e99 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -182,7 +182,6 @@
 	struct phy_device *phydev;
 	bool rx_pause, tx_pause;
 	struct dpaa_priv *priv;
-	u32 newadv, oldadv;
 	int err;
 
 	priv = netdev_priv(net_dev);
@@ -194,9 +193,7 @@
 		return -ENODEV;
 	}
 
-	if (!(phydev->supported & SUPPORTED_Pause) ||
-	    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
-	    (epause->rx_pause != epause->tx_pause)))
+	if (!phy_validate_pause(phydev, epause))
 		return -EINVAL;
 
 	/* The MAC should know how to handle PAUSE frame autonegotiation before
@@ -210,29 +207,8 @@
 	/* Determine the sym/asym advertised PAUSE capabilities from the desired
 	 * rx/tx pause settings.
 	 */
-	newadv = 0;
-	if (epause->rx_pause)
-		newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-	if (epause->tx_pause)
-		newadv ^= ADVERTISED_Asym_Pause;
 
-	oldadv = phydev->advertising &
-			(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-
-	/* If there are differences between the old and the new advertised
-	 * values, restart PHY autonegotiation and advertise the new values.
-	 */
-	if (oldadv != newadv) {
-		phydev->advertising &= ~(ADVERTISED_Pause
-				| ADVERTISED_Asym_Pause);
-		phydev->advertising |= newadv;
-		if (phydev->autoneg) {
-			err = phy_start_aneg(phydev);
-			if (err < 0)
-				netdev_err(net_dev, "phy_start_aneg() = %d\n",
-					   err);
-		}
-	}
+	phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
 
 	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
 	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
@@ -525,7 +501,7 @@
 	struct device_node *mac_node = dev->of_node;
 	struct device_node *fman_node = NULL, *ptp_node = NULL;
 	struct platform_device *ptp_dev = NULL;
-	struct qoriq_ptp *ptp = NULL;
+	struct ptp_qoriq *ptp = NULL;
 
 	info->phc_index = -1;
 
@@ -553,6 +529,75 @@
 	return 0;
 }
 
+static int dpaa_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *c)
+{
+	struct qman_portal *portal;
+	u32 period;
+	u8 thresh;
+
+	portal = qman_get_affine_portal(smp_processor_id());
+	qman_portal_get_iperiod(portal, &period);
+	qman_dqrr_get_ithresh(portal, &thresh);
+
+	c->rx_coalesce_usecs = period;
+	c->rx_max_coalesced_frames = thresh;
+	c->use_adaptive_rx_coalesce = false;
+
+	return 0;
+}
+
+static int dpaa_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *c)
+{
+	const cpumask_t *cpus = qman_affine_cpus();
+	bool needs_revert[NR_CPUS] = {false};
+	struct qman_portal *portal;
+	u32 period, prev_period;
+	u8 thresh, prev_thresh;
+	int cpu, res;
+
+	if (c->use_adaptive_rx_coalesce)
+		return -EINVAL;
+
+	period = c->rx_coalesce_usecs;
+	thresh = c->rx_max_coalesced_frames;
+
+	/* save previous values */
+	portal = qman_get_affine_portal(smp_processor_id());
+	qman_portal_get_iperiod(portal, &prev_period);
+	qman_dqrr_get_ithresh(portal, &prev_thresh);
+
+	/* set new values */
+	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
+		portal = qman_get_affine_portal(cpu);
+		res = qman_portal_set_iperiod(portal, period);
+		if (res)
+			goto revert_values;
+		res = qman_dqrr_set_ithresh(portal, thresh);
+		if (res) {
+			qman_portal_set_iperiod(portal, prev_period);
+			goto revert_values;
+		}
+		needs_revert[cpu] = true;
+	}
+
+	return 0;
+
+revert_values:
+	/* restore previous values */
+	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
+		if (!needs_revert[cpu])
+			continue;
+		portal = qman_get_affine_portal(cpu);
+		/* previous values will not fail, ignore return value */
+		qman_portal_set_iperiod(portal, prev_period);
+		qman_dqrr_set_ithresh(portal, prev_thresh);
+	}
+
+	return res;
+}
+
 const struct ethtool_ops dpaa_ethtool_ops = {
 	.get_drvinfo = dpaa_get_drvinfo,
 	.get_msglevel = dpaa_get_msglevel,
@@ -569,4 +614,6 @@
 	.get_rxnfc = dpaa_get_rxnfc,
 	.set_rxnfc = dpaa_set_rxnfc,
 	.get_ts_info = dpaa_get_ts_info,
+	.get_coalesce = dpaa_get_coalesce,
+	.set_coalesce = dpaa_set_coalesce,
 };
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
new file mode 100644
index 0000000..fbef282
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config FSL_DPAA2_ETH
+	tristate "Freescale DPAA2 Ethernet"
+	depends on FSL_MC_BUS && FSL_MC_DPIO
+	help
+	  This is the DPAA2 Ethernet driver supporting Freescale SoCs
+	  with DPAA2 (DataPath Acceleration Architecture v2).
+	  The driver manages network objects discovered on the Freescale
+	  MC bus.
+
+config FSL_DPAA2_PTP_CLOCK
+	tristate "Freescale DPAA2 PTP Clock"
+	depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
+	default y
+	help
+	  This driver adds support for using the DPAA2 1588 timer module
+	  as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
new file mode 100644
index 0000000..d1e78cd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH)		+= fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK)	+= fsl-dpaa2-ptp.o
+
+fsl-dpaa2-eth-objs	:= dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
+fsl-dpaa2-ptp-objs	:= dpaa2-ptp.o dprtc.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
new file mode 100644
index 0000000..a9afe46
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "dpaa2-eth.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+static struct dentry *dpaa2_dbg_root;
+
+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+	struct rtnl_link_stats64 *stats;
+	struct dpaa2_eth_drv_stats *extras;
+	int i;
+
+	seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+	seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+		   "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
+		   "Tx SG", "Tx realloc", "Enq busy");
+
+	for_each_online_cpu(i) {
+		stats = per_cpu_ptr(priv->percpu_stats, i);
+		extras = per_cpu_ptr(priv->percpu_extras, i);
+		seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+			   i,
+			   stats->rx_packets,
+			   stats->rx_errors,
+			   extras->rx_sg_frames,
+			   stats->tx_packets,
+			   stats->tx_errors,
+			   extras->tx_conf_frames,
+			   extras->tx_sg_frames,
+			   extras->tx_reallocs,
+			   extras->tx_portal_busy);
+	}
+
+	return 0;
+}
+
+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+	err = single_open(file, dpaa2_dbg_cpu_show, priv);
+	if (err < 0)
+		netdev_err(priv->net_dev, "single_open() failed\n");
+
+	return err;
+}
+
+static const struct file_operations dpaa2_dbg_cpu_ops = {
+	.open = dpaa2_dbg_cpu_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
+	switch (fq->type) {
+	case DPAA2_RX_FQ:
+		return "Rx";
+	case DPAA2_TX_CONF_FQ:
+		return "Tx conf";
+	default:
+		return "N/A";
+	}
+}
+
+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+	struct dpaa2_eth_fq *fq;
+	u32 fcnt, bcnt;
+	int i, err;
+
+	seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
+	seq_printf(file, "%s%16s%16s%16s%16s\n",
+		   "VFQID", "CPU", "Type", "Frames", "Pending frames");
+
+	for (i = 0; i <  priv->num_fqs; i++) {
+		fq = &priv->fq[i];
+		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+		if (err)
+			fcnt = 0;
+
+		seq_printf(file, "%5d%16d%16s%16llu%16u\n",
+			   fq->fqid,
+			   fq->target_cpu,
+			   fq_type_to_str(fq),
+			   fq->stats.frames,
+			   fcnt);
+	}
+
+	return 0;
+}
+
+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+	err = single_open(file, dpaa2_dbg_fqs_show, priv);
+	if (err < 0)
+		netdev_err(priv->net_dev, "single_open() failed\n");
+
+	return err;
+}
+
+static const struct file_operations dpaa2_dbg_fq_ops = {
+	.open = dpaa2_dbg_fqs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+	struct dpaa2_eth_channel *ch;
+	int i;
+
+	seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
+	seq_printf(file, "%s%16s%16s%16s%16s\n",
+		   "CHID", "CPU", "Deq busy", "CDANs", "Buf count");
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		seq_printf(file, "%4d%16d%16llu%16llu%16d\n",
+			   ch->ch_id,
+			   ch->nctx.desired_cpu,
+			   ch->stats.dequeue_portal_busy,
+			   ch->stats.cdan,
+			   ch->buf_count);
+	}
+
+	return 0;
+}
+
+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+	err = single_open(file, dpaa2_dbg_ch_show, priv);
+	if (err < 0)
+		netdev_err(priv->net_dev, "single_open() failed\n");
+
+	return err;
+}
+
+static const struct file_operations dpaa2_dbg_ch_ops = {
+	.open = dpaa2_dbg_ch_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
+	struct dentry *dir;
+
+	/* Create a directory for the interface */
+	dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root);
+	priv->dbg.dir = dir;
+
+	/* per-cpu stats file */
+	debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
+
+	/* per-fq stats file */
+	debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
+
+	/* per-fq stats file */
+	debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
+}
+
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
+	debugfs_remove_recursive(priv->dbg.dir);
+}
+
+void dpaa2_eth_dbg_init(void)
+{
+	dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
+	pr_debug("DPAA2-ETH: debugfs created\n");
+}
+
+void dpaa2_eth_dbg_exit(void)
+{
+	debugfs_remove(dpaa2_dbg_root);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
new file mode 100644
index 0000000..15598b2
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#ifndef DPAA2_ETH_DEBUGFS_H
+#define DPAA2_ETH_DEBUGFS_H
+
+#include <linux/dcache.h>
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_debugfs {
+	struct dentry *dir;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_eth_dbg_init(void);
+void dpaa2_eth_dbg_exit(void);
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
+#else
+static inline void dpaa2_eth_dbg_init(void) {}
+static inline void dpaa2_eth_dbg_exit(void) {}
+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
new file mode 100644
index 0000000..9801528
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM	dpaa2_eth
+
+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA2_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa2-eth.h"
+#include <linux/tracepoint.h>
+
+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
+/* trace_printk format for raw buffer event class */
+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor */
+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
+		    /* Trace function prototype */
+		    TP_PROTO(struct net_device *netdev,
+			     const struct dpaa2_fd *fd),
+
+		    /* Repeat argument list here */
+		    TP_ARGS(netdev, fd),
+
+		    /* A structure containing the relevant information we want
+		     * to record. Declare name and type for each normal element,
+		     * name, type and size for arrays. Use __string for variable
+		     * length strings.
+		     */
+		    TP_STRUCT__entry(
+				     __field(u64, fd_addr)
+				     __field(u32, fd_len)
+				     __field(u16, fd_offset)
+				     __string(name, netdev->name)
+		    ),
+
+		    /* The function that assigns values to the above declared
+		     * fields
+		     */
+		    TP_fast_assign(
+				   __entry->fd_addr = dpaa2_fd_get_addr(fd);
+				   __entry->fd_len = dpaa2_fd_get_len(fd);
+				   __entry->fd_offset = dpaa2_fd_get_offset(fd);
+				   __assign_str(name, netdev->name);
+		    ),
+
+		    /* This is what gets printed when the trace event is
+		     * triggered.
+		     */
+		    TP_printk(TR_FMT,
+			      __get_str(name),
+			      __entry->fd_addr,
+			      __entry->fd_len,
+			      __entry->fd_offset)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
+	     TP_PROTO(struct net_device *netdev,
+		      const struct dpaa2_fd *fd),
+
+	     TP_ARGS(netdev, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
+	     TP_PROTO(struct net_device *netdev,
+		      const struct dpaa2_fd *fd),
+
+	     TP_ARGS(netdev, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
+	     TP_PROTO(struct net_device *netdev,
+		      const struct dpaa2_fd *fd),
+
+	     TP_ARGS(netdev, fd)
+);
+
+/* Log data about raw buffers. Useful for tracing DPBP content. */
+TRACE_EVENT(dpaa2_eth_buf_seed,
+	    /* Trace function prototype */
+	    TP_PROTO(struct net_device *netdev,
+		     /* virtual address and size */
+		     void *vaddr,
+		     size_t size,
+		     /* dma map address and size */
+		     dma_addr_t dma_addr,
+		     size_t map_size,
+		     /* buffer pool id, if relevant */
+		     u16 bpid),
+
+	    /* Repeat argument list here */
+	    TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+	    /* A structure containing the relevant information we want
+	     * to record. Declare name and type for each normal element,
+	     * name, type and size for arrays. Use __string for variable
+	     * length strings.
+	     */
+	    TP_STRUCT__entry(
+			     __field(void *, vaddr)
+			     __field(size_t, size)
+			     __field(dma_addr_t, dma_addr)
+			     __field(size_t, map_size)
+			     __field(u16, bpid)
+			     __string(name, netdev->name)
+	    ),
+
+	    /* The function that assigns values to the above declared
+	     * fields
+	     */
+	    TP_fast_assign(
+			   __entry->vaddr = vaddr;
+			   __entry->size = size;
+			   __entry->dma_addr = dma_addr;
+			   __entry->map_size = map_size;
+			   __entry->bpid = bpid;
+			   __assign_str(name, netdev->name);
+	    ),
+
+	    /* This is what gets printed when the trace event is
+	     * triggered.
+	     */
+	    TP_printk(TR_BUF_FMT,
+		      __get_str(name),
+		      __entry->vaddr,
+		      __entry->size,
+		      &__entry->dma_addr,
+		      __entry->map_size,
+		      __entry->bpid)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA2_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE	dpaa2-eth-trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
new file mode 100644
index 0000000..bf5add9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -0,0 +1,3659 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/mc.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/sock.h>
+
+#include "dpaa2-eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa2-eth-trace.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+				dma_addr_t iova_addr)
+{
+	phys_addr_t phys_addr;
+
+	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+	return phys_to_virt(phys_addr);
+}
+
+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
+			     u32 fd_status,
+			     struct sk_buff *skb)
+{
+	skb_checksum_none_assert(skb);
+
+	/* HW checksum validation is disabled, nothing to do here */
+	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* Read checksum validation bits */
+	if (!((fd_status & DPAA2_FAS_L3CV) &&
+	      (fd_status & DPAA2_FAS_L4CV)))
+		return;
+
+	/* Inform the stack there's no need to compute L3/L4 csum anymore */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void free_rx_fd(struct dpaa2_eth_priv *priv,
+		       const struct dpaa2_fd *fd,
+		       void *vaddr)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	dma_addr_t addr = dpaa2_fd_get_addr(fd);
+	u8 fd_format = dpaa2_fd_get_format(fd);
+	struct dpaa2_sg_entry *sgt;
+	void *sg_vaddr;
+	int i;
+
+	/* If single buffer frame, just free the data buffer */
+	if (fd_format == dpaa2_fd_single)
+		goto free_buf;
+	else if (fd_format != dpaa2_fd_sg)
+		/* We don't support any other format */
+		return;
+
+	/* For S/G frames, we first need to free all SG entries
+	 * except the first one, which was taken care of already
+	 */
+	sgt = vaddr + dpaa2_fd_get_offset(fd);
+	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+		addr = dpaa2_sg_get_addr(&sgt[i]);
+		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+			       DMA_BIDIRECTIONAL);
+
+		free_pages((unsigned long)sg_vaddr, 0);
+		if (dpaa2_sg_is_final(&sgt[i]))
+			break;
+	}
+
+free_buf:
+	free_pages((unsigned long)vaddr, 0);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
+					const struct dpaa2_fd *fd,
+					void *fd_vaddr)
+{
+	struct sk_buff *skb = NULL;
+	u16 fd_offset = dpaa2_fd_get_offset(fd);
+	u32 fd_length = dpaa2_fd_get_len(fd);
+
+	ch->buf_count--;
+
+	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	skb_reserve(skb, fd_offset);
+	skb_put(skb, fd_length);
+
+	return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
+				      struct dpaa2_eth_channel *ch,
+				      struct dpaa2_sg_entry *sgt)
+{
+	struct sk_buff *skb = NULL;
+	struct device *dev = priv->net_dev->dev.parent;
+	void *sg_vaddr;
+	dma_addr_t sg_addr;
+	u16 sg_offset;
+	u32 sg_length;
+	struct page *page, *head_page;
+	int page_offset;
+	int i;
+
+	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+		struct dpaa2_sg_entry *sge = &sgt[i];
+
+		/* NOTE: We only support SG entries in dpaa2_sg_single format,
+		 * but this is the only format we may receive from HW anyway
+		 */
+
+		/* Get the address and length from the S/G entry */
+		sg_addr = dpaa2_sg_get_addr(sge);
+		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
+		dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+			       DMA_BIDIRECTIONAL);
+
+		sg_length = dpaa2_sg_get_len(sge);
+
+		if (i == 0) {
+			/* We build the skb around the first data buffer */
+			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
+			if (unlikely(!skb)) {
+				/* Free the first SG entry now, since we already
+				 * unmapped it and obtained the virtual address
+				 */
+				free_pages((unsigned long)sg_vaddr, 0);
+
+				/* We still need to subtract the buffers used
+				 * by this FD from our software counter
+				 */
+				while (!dpaa2_sg_is_final(&sgt[i]) &&
+				       i < DPAA2_ETH_MAX_SG_ENTRIES)
+					i++;
+				break;
+			}
+
+			sg_offset = dpaa2_sg_get_offset(sge);
+			skb_reserve(skb, sg_offset);
+			skb_put(skb, sg_length);
+		} else {
+			/* Rest of the data buffers are stored as skb frags */
+			page = virt_to_page(sg_vaddr);
+			head_page = virt_to_head_page(sg_vaddr);
+
+			/* Offset in page (which may be compound).
+			 * Data in subsequent SG entries is stored from the
+			 * beginning of the buffer, so we don't need to add the
+			 * sg_offset.
+			 */
+			page_offset = ((unsigned long)sg_vaddr &
+				(PAGE_SIZE - 1)) +
+				(page_address(page) - page_address(head_page));
+
+			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+					sg_length, DPAA2_ETH_RX_BUF_SIZE);
+		}
+
+		if (dpaa2_sg_is_final(sge))
+			break;
+	}
+
+	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
+	/* Count all data buffers + SG table buffer */
+	ch->buf_count -= i + 2;
+
+	return skb;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	void *vaddr;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+		dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+			       DMA_BIDIRECTIONAL);
+		free_pages((unsigned long)vaddr, 0);
+	}
+}
+
+static void xdp_release_buf(struct dpaa2_eth_priv *priv,
+			    struct dpaa2_eth_channel *ch,
+			    dma_addr_t addr)
+{
+	int err;
+
+	ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+	if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+		return;
+
+	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+					       ch->xdp.drop_bufs,
+					       ch->xdp.drop_cnt)) == -EBUSY)
+		cpu_relax();
+
+	if (err) {
+		free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+		ch->buf_count -= ch->xdp.drop_cnt;
+	}
+
+	ch->xdp.drop_cnt = 0;
+}
+
+static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
+		       void *buf_start, u16 queue_id)
+{
+	struct dpaa2_eth_fq *fq;
+	struct dpaa2_faead *faead;
+	u32 ctrl, frc;
+	int i, err;
+
+	/* Mark the egress frame hardware annotation area as valid */
+	frc = dpaa2_fd_get_frc(fd);
+	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+	/* Instruct hardware to release the FD buffer directly into
+	 * the buffer pool once transmission is completed, instead of
+	 * sending a Tx confirmation frame to us
+	 */
+	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+	faead = dpaa2_get_faead(buf_start, false);
+	faead->ctrl = cpu_to_le32(ctrl);
+	faead->conf_fqid = 0;
+
+	fq = &priv->fq[queue_id];
+	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+		err = priv->enqueue(priv, fq, fd, 0);
+		if (err != -EBUSY)
+			break;
+	}
+
+	return err;
+}
+
+static u32 run_xdp(struct dpaa2_eth_priv *priv,
+		   struct dpaa2_eth_channel *ch,
+		   struct dpaa2_eth_fq *rx_fq,
+		   struct dpaa2_fd *fd, void *vaddr)
+{
+	dma_addr_t addr = dpaa2_fd_get_addr(fd);
+	struct rtnl_link_stats64 *percpu_stats;
+	struct bpf_prog *xdp_prog;
+	struct xdp_buff xdp;
+	u32 xdp_act = XDP_PASS;
+	int err;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+	rcu_read_lock();
+
+	xdp_prog = READ_ONCE(ch->xdp.prog);
+	if (!xdp_prog)
+		goto out;
+
+	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
+	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
+	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+	xdp_set_data_meta_invalid(&xdp);
+	xdp.rxq = &ch->xdp_rxq;
+
+	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+	/* xdp.data pointer may have changed */
+	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+	switch (xdp_act) {
+	case XDP_PASS:
+		break;
+	case XDP_TX:
+		err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
+		if (err) {
+			xdp_release_buf(priv, ch, addr);
+			percpu_stats->tx_errors++;
+			ch->stats.xdp_tx_err++;
+		} else {
+			percpu_stats->tx_packets++;
+			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+			ch->stats.xdp_tx++;
+		}
+		break;
+	default:
+		bpf_warn_invalid_xdp_action(xdp_act);
+		/* fall through */
+	case XDP_ABORTED:
+		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+		/* fall through */
+	case XDP_DROP:
+		xdp_release_buf(priv, ch, addr);
+		ch->stats.xdp_drop++;
+		break;
+	case XDP_REDIRECT:
+		dma_unmap_page(priv->net_dev->dev.parent, addr,
+			       DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+		ch->buf_count--;
+		xdp.data_hard_start = vaddr;
+		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+		if (unlikely(err))
+			ch->stats.xdp_drop++;
+		else
+			ch->stats.xdp_redirect++;
+		break;
+	}
+
+	ch->xdp.res |= xdp_act;
+out:
+	rcu_read_unlock();
+	return xdp_act;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_channel *ch,
+			 const struct dpaa2_fd *fd,
+			 struct dpaa2_eth_fq *fq)
+{
+	dma_addr_t addr = dpaa2_fd_get_addr(fd);
+	u8 fd_format = dpaa2_fd_get_format(fd);
+	void *vaddr;
+	struct sk_buff *skb;
+	struct rtnl_link_stats64 *percpu_stats;
+	struct dpaa2_eth_drv_stats *percpu_extras;
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpaa2_fas *fas;
+	void *buf_data;
+	u32 status = 0;
+	u32 xdp_act;
+
+	/* Tracing point */
+	trace_dpaa2_rx_fd(priv->net_dev, fd);
+
+	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+				DMA_BIDIRECTIONAL);
+
+	fas = dpaa2_get_fas(vaddr, false);
+	prefetch(fas);
+	buf_data = vaddr + dpaa2_fd_get_offset(fd);
+	prefetch(buf_data);
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+	percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+	if (fd_format == dpaa2_fd_single) {
+		xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+		if (xdp_act != XDP_PASS) {
+			percpu_stats->rx_packets++;
+			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+			return;
+		}
+
+		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+			       DMA_BIDIRECTIONAL);
+		skb = build_linear_skb(ch, fd, vaddr);
+	} else if (fd_format == dpaa2_fd_sg) {
+		WARN_ON(priv->xdp_prog);
+
+		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+			       DMA_BIDIRECTIONAL);
+		skb = build_frag_skb(priv, ch, buf_data);
+		free_pages((unsigned long)vaddr, 0);
+		percpu_extras->rx_sg_frames++;
+		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
+	} else {
+		/* We don't support any other format */
+		goto err_frame_format;
+	}
+
+	if (unlikely(!skb))
+		goto err_build_skb;
+
+	prefetch(skb->data);
+
+	/* Get the timestamp value */
+	if (priv->rx_tstamp) {
+		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+		__le64 *ts = dpaa2_get_ts(vaddr, false);
+		u64 ns;
+
+		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+		shhwtstamps->hwtstamp = ns_to_ktime(ns);
+	}
+
+	/* Check if we need to validate the L4 csum */
+	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+		status = le32_to_cpu(fas->status);
+		validate_rx_csum(priv, status, skb);
+	}
+
+	skb->protocol = eth_type_trans(skb, priv->net_dev);
+	skb_record_rx_queue(skb, fq->flowid);
+
+	percpu_stats->rx_packets++;
+	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+
+	list_add_tail(&skb->list, ch->rx_list);
+
+	return;
+
+err_build_skb:
+	free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+	percpu_stats->rx_dropped++;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int consume_frames(struct dpaa2_eth_channel *ch,
+			  struct dpaa2_eth_fq **src)
+{
+	struct dpaa2_eth_priv *priv = ch->priv;
+	struct dpaa2_eth_fq *fq = NULL;
+	struct dpaa2_dq *dq;
+	const struct dpaa2_fd *fd;
+	int cleaned = 0;
+	int is_last;
+
+	do {
+		dq = dpaa2_io_store_next(ch->store, &is_last);
+		if (unlikely(!dq)) {
+			/* If we're here, we *must* have placed a
+			 * volatile dequeue comnmand, so keep reading through
+			 * the store until we get some sort of valid response
+			 * token (either a valid frame or an "empty dequeue")
+			 */
+			continue;
+		}
+
+		fd = dpaa2_dq_fd(dq);
+		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+
+		fq->consume(priv, ch, fd, fq);
+		cleaned++;
+	} while (!is_last);
+
+	if (!cleaned)
+		return 0;
+
+	fq->stats.frames += cleaned;
+
+	/* A dequeue operation only pulls frames from a single queue
+	 * into the store. Return the frame queue as an out param.
+	 */
+	if (src)
+		*src = fq;
+
+	return cleaned;
+}
+
+/* Configure the egress frame annotation for timestamp update */
+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
+	struct dpaa2_faead *faead;
+	u32 ctrl, frc;
+
+	/* Mark the egress frame annotation area as valid */
+	frc = dpaa2_fd_get_frc(fd);
+	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
+	/* Set hardware annotation size */
+	ctrl = dpaa2_fd_get_ctrl(fd);
+	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
+
+	/* enable UPD (update prepanded data) bit in FAEAD field of
+	 * hardware frame annotation area
+	 */
+	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+	faead = dpaa2_get_faead(buf_start, true);
+	faead->ctrl = cpu_to_le32(ctrl);
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int build_sg_fd(struct dpaa2_eth_priv *priv,
+		       struct sk_buff *skb,
+		       struct dpaa2_fd *fd)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	void *sgt_buf = NULL;
+	dma_addr_t addr;
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	struct dpaa2_sg_entry *sgt;
+	int i, err;
+	int sgt_buf_size;
+	struct scatterlist *scl, *crt_scl;
+	int num_sg;
+	int num_dma_bufs;
+	struct dpaa2_eth_swa *swa;
+
+	/* Create and map scatterlist.
+	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+	 * to go beyond nr_frags+1.
+	 * Note: We don't support chained scatterlists
+	 */
+	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+		return -EINVAL;
+
+	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+	if (unlikely(!scl))
+		return -ENOMEM;
+
+	sg_init_table(scl, nr_frags + 1);
+	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+	if (unlikely(!num_dma_bufs)) {
+		err = -ENOMEM;
+		goto dma_map_sg_failed;
+	}
+
+	/* Prepare the HW SGT structure */
+	sgt_buf_size = priv->tx_data_offset +
+		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
+	sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+	if (unlikely(!sgt_buf)) {
+		err = -ENOMEM;
+		goto sgt_buf_alloc_failed;
+	}
+	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+	memset(sgt_buf, 0, sgt_buf_size);
+
+	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+	/* Fill in the HW SGT structure.
+	 *
+	 * sgt_buf is zeroed out, so the following fields are implicit
+	 * in all sgt entries:
+	 *   - offset is 0
+	 *   - format is 'dpaa2_sg_single'
+	 */
+	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+	}
+	dpaa2_sg_set_final(&sgt[i - 1], true);
+
+	/* Store the skb backpointer in the SGT buffer.
+	 * Fit the scatterlist and the number of buffers alongside the
+	 * skb backpointer in the software annotation area. We'll need
+	 * all of them on Tx Conf.
+	 */
+	swa = (struct dpaa2_eth_swa *)sgt_buf;
+	swa->type = DPAA2_ETH_SWA_SG;
+	swa->sg.skb = skb;
+	swa->sg.scl = scl;
+	swa->sg.num_sg = num_sg;
+	swa->sg.sgt_size = sgt_buf_size;
+
+	/* Separately map the SGT buffer */
+	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dev, addr))) {
+		err = -ENOMEM;
+		goto dma_map_single_failed;
+	}
+	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+	dpaa2_fd_set_addr(fd, addr);
+	dpaa2_fd_set_len(fd, skb->len);
+	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+		enable_tx_tstamp(fd, sgt_buf);
+
+	return 0;
+
+dma_map_single_failed:
+	skb_free_frag(sgt_buf);
+sgt_buf_alloc_failed:
+	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+dma_map_sg_failed:
+	kfree(scl);
+	return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int build_single_fd(struct dpaa2_eth_priv *priv,
+			   struct sk_buff *skb,
+			   struct dpaa2_fd *fd)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	u8 *buffer_start, *aligned_start;
+	struct dpaa2_eth_swa *swa;
+	dma_addr_t addr;
+
+	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+
+	/* If there's enough room to align the FD address, do it.
+	 * It will help hardware optimize accesses.
+	 */
+	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+				  DPAA2_ETH_TX_BUF_ALIGN);
+	if (aligned_start >= skb->head)
+		buffer_start = aligned_start;
+
+	/* Store a backpointer to the skb at the beginning of the buffer
+	 * (in the private data area) such that we can release it
+	 * on Tx confirm
+	 */
+	swa = (struct dpaa2_eth_swa *)buffer_start;
+	swa->type = DPAA2_ETH_SWA_SINGLE;
+	swa->single.skb = skb;
+
+	addr = dma_map_single(dev, buffer_start,
+			      skb_tail_pointer(skb) - buffer_start,
+			      DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dev, addr)))
+		return -ENOMEM;
+
+	dpaa2_fd_set_addr(fd, addr);
+	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+	dpaa2_fd_set_len(fd, skb->len);
+	dpaa2_fd_set_format(fd, dpaa2_fd_single);
+	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+		enable_tx_tstamp(fd, buffer_start);
+
+	return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ */
+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+		       struct dpaa2_eth_fq *fq,
+		       const struct dpaa2_fd *fd, bool in_napi)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	dma_addr_t fd_addr;
+	struct sk_buff *skb = NULL;
+	unsigned char *buffer_start;
+	struct dpaa2_eth_swa *swa;
+	u8 fd_format = dpaa2_fd_get_format(fd);
+	u32 fd_len = dpaa2_fd_get_len(fd);
+
+	fd_addr = dpaa2_fd_get_addr(fd);
+	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+	swa = (struct dpaa2_eth_swa *)buffer_start;
+
+	if (fd_format == dpaa2_fd_single) {
+		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
+			skb = swa->single.skb;
+			/* Accessing the skb buffer is safe before dma unmap,
+			 * because we didn't map the actual skb shell.
+			 */
+			dma_unmap_single(dev, fd_addr,
+					 skb_tail_pointer(skb) - buffer_start,
+					 DMA_BIDIRECTIONAL);
+		} else {
+			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
+			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
+					 DMA_BIDIRECTIONAL);
+		}
+	} else if (fd_format == dpaa2_fd_sg) {
+		skb = swa->sg.skb;
+
+		/* Unmap the scatterlist */
+		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+			     DMA_BIDIRECTIONAL);
+		kfree(swa->sg.scl);
+
+		/* Unmap the SGT buffer */
+		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+				 DMA_BIDIRECTIONAL);
+	} else {
+		netdev_dbg(priv->net_dev, "Invalid FD format\n");
+		return;
+	}
+
+	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
+		fq->dq_frames++;
+		fq->dq_bytes += fd_len;
+	}
+
+	if (swa->type == DPAA2_ETH_SWA_XDP) {
+		xdp_return_frame(swa->xdp.xdpf);
+		return;
+	}
+
+	/* Get the timestamp value */
+	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+		struct skb_shared_hwtstamps shhwtstamps;
+		__le64 *ts = dpaa2_get_ts(buffer_start, true);
+		u64 ns;
+
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+		shhwtstamps.hwtstamp = ns_to_ktime(ns);
+		skb_tstamp_tx(skb, &shhwtstamps);
+	}
+
+	/* Free SGT buffer allocated on tx */
+	if (fd_format != dpaa2_fd_single)
+		skb_free_frag(buffer_start);
+
+	/* Move on with skb release */
+	napi_consume_skb(skb, in_napi);
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpaa2_fd fd;
+	struct rtnl_link_stats64 *percpu_stats;
+	struct dpaa2_eth_drv_stats *percpu_extras;
+	struct dpaa2_eth_fq *fq;
+	struct netdev_queue *nq;
+	u16 queue_mapping;
+	unsigned int needed_headroom;
+	u32 fd_len;
+	u8 prio = 0;
+	int err, i;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+	percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+	if (skb_headroom(skb) < needed_headroom) {
+		struct sk_buff *ns;
+
+		ns = skb_realloc_headroom(skb, needed_headroom);
+		if (unlikely(!ns)) {
+			percpu_stats->tx_dropped++;
+			goto err_alloc_headroom;
+		}
+		percpu_extras->tx_reallocs++;
+
+		if (skb->sk)
+			skb_set_owner_w(ns, skb->sk);
+
+		dev_kfree_skb(skb);
+		skb = ns;
+	}
+
+	/* We'll be holding a back-reference to the skb until Tx Confirmation;
+	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
+	 */
+	skb = skb_unshare(skb, GFP_ATOMIC);
+	if (unlikely(!skb)) {
+		/* skb_unshare() has already freed the skb */
+		percpu_stats->tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/* Setup the FD fields */
+	memset(&fd, 0, sizeof(fd));
+
+	if (skb_is_nonlinear(skb)) {
+		err = build_sg_fd(priv, skb, &fd);
+		percpu_extras->tx_sg_frames++;
+		percpu_extras->tx_sg_bytes += skb->len;
+	} else {
+		err = build_single_fd(priv, skb, &fd);
+	}
+
+	if (unlikely(err)) {
+		percpu_stats->tx_dropped++;
+		goto err_build_fd;
+	}
+
+	/* Tracing point */
+	trace_dpaa2_tx_fd(net_dev, &fd);
+
+	/* TxConf FQ selection relies on queue id from the stack.
+	 * In case of a forwarded frame from another DPNI interface, we choose
+	 * a queue affined to the same core that processed the Rx frame
+	 */
+	queue_mapping = skb_get_queue_mapping(skb);
+
+	if (net_dev->num_tc) {
+		prio = netdev_txq_to_tc(net_dev, queue_mapping);
+		/* Hardware interprets priority level 0 as being the highest,
+		 * so we need to do a reverse mapping to the netdev tc index
+		 */
+		prio = net_dev->num_tc - prio - 1;
+		/* We have only one FQ array entry for all Tx hardware queues
+		 * with the same flow id (but different priority levels)
+		 */
+		queue_mapping %= dpaa2_eth_queue_count(priv);
+	}
+	fq = &priv->fq[queue_mapping];
+
+	fd_len = dpaa2_fd_get_len(&fd);
+	nq = netdev_get_tx_queue(net_dev, queue_mapping);
+	netdev_tx_sent_queue(nq, fd_len);
+
+	/* Everything that happens after this enqueues might race with
+	 * the Tx confirmation callback for this frame
+	 */
+	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+		err = priv->enqueue(priv, fq, &fd, prio);
+		if (err != -EBUSY)
+			break;
+	}
+	percpu_extras->tx_portal_busy += i;
+	if (unlikely(err < 0)) {
+		percpu_stats->tx_errors++;
+		/* Clean up everything, including freeing the skb */
+		free_tx_fd(priv, fq, &fd, false);
+		netdev_tx_completed_queue(nq, 1, fd_len);
+	} else {
+		percpu_stats->tx_packets++;
+		percpu_stats->tx_bytes += fd_len;
+	}
+
+	return NETDEV_TX_OK;
+
+err_build_fd:
+err_alloc_headroom:
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+			      struct dpaa2_eth_channel *ch __always_unused,
+			      const struct dpaa2_fd *fd,
+			      struct dpaa2_eth_fq *fq)
+{
+	struct rtnl_link_stats64 *percpu_stats;
+	struct dpaa2_eth_drv_stats *percpu_extras;
+	u32 fd_len = dpaa2_fd_get_len(fd);
+	u32 fd_errors;
+
+	/* Tracing point */
+	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
+	percpu_extras = this_cpu_ptr(priv->percpu_extras);
+	percpu_extras->tx_conf_frames++;
+	percpu_extras->tx_conf_bytes += fd_len;
+
+	/* Check frame errors in the FD field */
+	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+	free_tx_fd(priv, fq, fd, true);
+
+	if (likely(!fd_errors))
+		return;
+
+	if (net_ratelimit())
+		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
+			   fd_errors);
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+	/* Tx-conf logically pertains to the egress path. */
+	percpu_stats->tx_errors++;
+}
+
+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+	int err;
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_RX_L3_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev,
+			   "dpni_set_offload(RX_L3_CSUM) failed\n");
+		return err;
+	}
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_RX_L4_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev,
+			   "dpni_set_offload(RX_L4_CSUM) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+	int err;
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_TX_L3_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+		return err;
+	}
+
+	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+			       DPNI_OFF_TX_L4_CSUM, enable);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int add_bufs(struct dpaa2_eth_priv *priv,
+		    struct dpaa2_eth_channel *ch, u16 bpid)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+	struct page *page;
+	dma_addr_t addr;
+	int i, err;
+
+	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+		/* Allocate buffer visible to WRIOP + skb shared info +
+		 * alignment padding
+		 */
+		/* allocate one page for each Rx buffer. WRIOP sees
+		 * the entire page except for a tailroom reserved for
+		 * skb shared info
+		 */
+		page = dev_alloc_pages(0);
+		if (!page)
+			goto err_alloc;
+
+		addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
+				    DMA_BIDIRECTIONAL);
+		if (unlikely(dma_mapping_error(dev, addr)))
+			goto err_map;
+
+		buf_array[i] = addr;
+
+		/* tracing point */
+		trace_dpaa2_eth_buf_seed(priv->net_dev,
+					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+					 addr, DPAA2_ETH_RX_BUF_SIZE,
+					 bpid);
+	}
+
+release_bufs:
+	/* In case the portal is busy, retry until successful */
+	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+					       buf_array, i)) == -EBUSY)
+		cpu_relax();
+
+	/* If release command failed, clean up and bail out;
+	 * not much else we can do about it
+	 */
+	if (err) {
+		free_bufs(priv, buf_array, i);
+		return 0;
+	}
+
+	return i;
+
+err_map:
+	__free_pages(page, 0);
+err_alloc:
+	/* If we managed to allocate at least some buffers,
+	 * release them to hardware
+	 */
+	if (i)
+		goto release_bufs;
+
+	return 0;
+}
+
+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+	int i, j;
+	int new_count;
+
+	for (j = 0; j < priv->num_channels; j++) {
+		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+		     i += DPAA2_ETH_BUFS_PER_CMD) {
+			new_count = add_bufs(priv, priv->channel[j], bpid);
+			priv->channel[j]->buf_count += new_count;
+
+			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+				return -ENOMEM;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
+	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+	int ret;
+
+	do {
+		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
+					       buf_array, count);
+		if (ret < 0) {
+			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+			return;
+		}
+		free_bufs(priv, buf_array, ret);
+	} while (ret);
+}
+
+static void drain_pool(struct dpaa2_eth_priv *priv)
+{
+	int i;
+
+	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+	drain_bufs(priv, 1);
+
+	for (i = 0; i < priv->num_channels; i++)
+		priv->channel[i]->buf_count = 0;
+}
+
+/* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+static int refill_pool(struct dpaa2_eth_priv *priv,
+		       struct dpaa2_eth_channel *ch,
+		       u16 bpid)
+{
+	int new_count;
+
+	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+		return 0;
+
+	do {
+		new_count = add_bufs(priv, ch, bpid);
+		if (unlikely(!new_count)) {
+			/* Out of memory; abort for now, we'll try later on */
+			break;
+		}
+		ch->buf_count += new_count;
+	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int pull_channel(struct dpaa2_eth_channel *ch)
+{
+	int err;
+	int dequeues = -1;
+
+	/* Retry while portal is busy */
+	do {
+		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
+						    ch->store);
+		dequeues++;
+		cpu_relax();
+	} while (err == -EBUSY);
+
+	ch->stats.dequeue_portal_busy += dequeues;
+	if (unlikely(err))
+		ch->stats.pull_err++;
+
+	return err;
+}
+
+/* NAPI poll routine
+ *
+ * Frames are dequeued from the QMan channel associated with this NAPI context.
+ * Rx, Tx confirmation and (if configured) Rx error frames all count
+ * towards the NAPI budget.
+ */
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+	struct dpaa2_eth_channel *ch;
+	struct dpaa2_eth_priv *priv;
+	int rx_cleaned = 0, txconf_cleaned = 0;
+	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+	struct netdev_queue *nq;
+	int store_cleaned, work_done;
+	struct list_head rx_list;
+	int err;
+
+	ch = container_of(napi, struct dpaa2_eth_channel, napi);
+	ch->xdp.res = 0;
+	priv = ch->priv;
+
+	INIT_LIST_HEAD(&rx_list);
+	ch->rx_list = &rx_list;
+
+	do {
+		err = pull_channel(ch);
+		if (unlikely(err))
+			break;
+
+		/* Refill pool if appropriate */
+		refill_pool(priv, ch, priv->bpid);
+
+		store_cleaned = consume_frames(ch, &fq);
+		if (!store_cleaned)
+			break;
+		if (fq->type == DPAA2_RX_FQ) {
+			rx_cleaned += store_cleaned;
+		} else {
+			txconf_cleaned += store_cleaned;
+			/* We have a single Tx conf FQ on this channel */
+			txc_fq = fq;
+		}
+
+		/* If we either consumed the whole NAPI budget with Rx frames
+		 * or we reached the Tx confirmations threshold, we're done.
+		 */
+		if (rx_cleaned >= budget ||
+		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+			work_done = budget;
+			goto out;
+		}
+	} while (store_cleaned);
+
+	/* We didn't consume the entire budget, so finish napi and
+	 * re-enable data availability notifications
+	 */
+	napi_complete_done(napi, rx_cleaned);
+	do {
+		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+		cpu_relax();
+	} while (err == -EBUSY);
+	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+		  ch->nctx.desired_cpu);
+
+	work_done = max(rx_cleaned, 1);
+
+out:
+	netif_receive_skb_list(ch->rx_list);
+
+	if (txc_fq && txc_fq->dq_frames) {
+		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+					  txc_fq->dq_bytes);
+		txc_fq->dq_frames = 0;
+		txc_fq->dq_bytes = 0;
+	}
+
+	if (ch->xdp.res & XDP_REDIRECT)
+		xdp_do_flush_map();
+
+	return work_done;
+}
+
+static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *ch;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		napi_enable(&ch->napi);
+	}
+}
+
+static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *ch;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		napi_disable(&ch->napi);
+	}
+}
+
+static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+{
+	struct dpni_taildrop td = {0};
+	int i, err;
+
+	if (priv->rx_td_enabled == enable)
+		return;
+
+	td.enable = enable;
+	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+
+	for (i = 0; i < priv->num_fqs; i++) {
+		if (priv->fq[i].type != DPAA2_RX_FQ)
+			continue;
+		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+					DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
+					priv->fq[i].flowid, &td);
+		if (err) {
+			netdev_err(priv->net_dev,
+				   "dpni_set_taildrop() failed\n");
+			break;
+		}
+	}
+
+	priv->rx_td_enabled = enable;
+}
+
+static void update_tx_fqids(struct dpaa2_eth_priv *priv);
+
+static int link_state_update(struct dpaa2_eth_priv *priv)
+{
+	struct dpni_link_state state = {0};
+	bool tx_pause;
+	int err;
+
+	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+	if (unlikely(err)) {
+		netdev_err(priv->net_dev,
+			   "dpni_get_link_state() failed\n");
+		return err;
+	}
+
+	/* If Tx pause frame settings have changed, we need to update
+	 * Rx FQ taildrop configuration as well. We configure taildrop
+	 * only when pause frame generation is disabled.
+	 */
+	tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
+		   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+	dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+
+	/* Chech link state; speed / duplex changes are not treated yet */
+	if (priv->link_state.up == state.up)
+		goto out;
+
+	if (state.up) {
+		update_tx_fqids(priv);
+		netif_carrier_on(priv->net_dev);
+		netif_tx_start_all_queues(priv->net_dev);
+	} else {
+		netif_tx_stop_all_queues(priv->net_dev);
+		netif_carrier_off(priv->net_dev);
+	}
+
+	netdev_info(priv->net_dev, "Link Event: state %s\n",
+		    state.up ? "up" : "down");
+
+out:
+	priv->link_state = state;
+
+	return 0;
+}
+
+static int dpaa2_eth_open(struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int err;
+
+	err = seed_pool(priv, priv->bpid);
+	if (err) {
+		/* Not much to do; the buffer pool, though not filled up,
+		 * may still contain some buffers which would enable us
+		 * to limp on.
+		 */
+		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+			   priv->dpbp_dev->obj_desc.id, priv->bpid);
+	}
+
+	/* We'll only start the txqs when the link is actually ready; make sure
+	 * we don't race against the link up notification, which may come
+	 * immediately after dpni_enable();
+	 */
+	netif_tx_stop_all_queues(net_dev);
+	enable_ch_napi(priv);
+	/* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
+	 * return true and cause 'ip link show' to report the LOWER_UP flag,
+	 * even though the link notification wasn't even received.
+	 */
+	netif_carrier_off(net_dev);
+
+	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+	if (err < 0) {
+		netdev_err(net_dev, "dpni_enable() failed\n");
+		goto enable_err;
+	}
+
+	/* If the DPMAC object has already processed the link up interrupt,
+	 * we have to learn the link state ourselves.
+	 */
+	err = link_state_update(priv);
+	if (err < 0) {
+		netdev_err(net_dev, "Can't update link state\n");
+		goto link_state_err;
+	}
+
+	return 0;
+
+link_state_err:
+enable_err:
+	disable_ch_napi(priv);
+	drain_pool(priv);
+	return err;
+}
+
+/* Total number of in-flight frames on ingress queues */
+static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_fq *fq;
+	u32 fcnt = 0, bcnt = 0, total = 0;
+	int i, err;
+
+	for (i = 0; i < priv->num_fqs; i++) {
+		fq = &priv->fq[i];
+		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+		if (err) {
+			netdev_warn(priv->net_dev, "query_fq_count failed");
+			break;
+		}
+		total += fcnt;
+	}
+
+	return total;
+}
+
+static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+	int retries = 10;
+	u32 pending;
+
+	do {
+		pending = ingress_fq_count(priv);
+		if (pending)
+			msleep(100);
+	} while (pending && --retries);
+}
+
+#define DPNI_TX_PENDING_VER_MAJOR	7
+#define DPNI_TX_PENDING_VER_MINOR	13
+static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+	union dpni_statistics stats;
+	int retries = 10;
+	int err;
+
+	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
+				   DPNI_TX_PENDING_VER_MINOR) < 0)
+		goto out;
+
+	do {
+		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
+					  &stats);
+		if (err)
+			goto out;
+		if (stats.page_6.tx_pending_frames == 0)
+			return;
+	} while (--retries);
+
+out:
+	msleep(500);
+}
+
+static int dpaa2_eth_stop(struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int dpni_enabled = 0;
+	int retries = 10;
+
+	netif_tx_stop_all_queues(net_dev);
+	netif_carrier_off(net_dev);
+
+	/* On dpni_disable(), the MC firmware will:
+	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
+	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
+	 * of all in flight Tx frames is finished (and corresponding Tx conf
+	 * frames are enqueued back to software)
+	 *
+	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
+	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
+	 * and Tx conf queues are consumed on NAPI poll.
+	 */
+	wait_for_egress_fq_empty(priv);
+
+	do {
+		dpni_disable(priv->mc_io, 0, priv->mc_token);
+		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+		if (dpni_enabled)
+			/* Allow the hardware some slack */
+			msleep(100);
+	} while (dpni_enabled && --retries);
+	if (!retries) {
+		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
+		/* Must go on and disable NAPI nonetheless, so we don't crash at
+		 * the next "ifconfig up"
+		 */
+	}
+
+	wait_for_ingress_fq_empty(priv);
+	disable_ch_napi(priv);
+
+	/* Empty the buffer pool */
+	drain_pool(priv);
+
+	return 0;
+}
+
+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct device *dev = net_dev->dev.parent;
+	int err;
+
+	err = eth_mac_addr(net_dev, addr);
+	if (err < 0) {
+		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+		return err;
+	}
+
+	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+					net_dev->dev_addr);
+	if (err) {
+		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+/** Fill in counters maintained by the GPP driver. These may be different from
+ * the hardware counters obtained by ethtool.
+ */
+static void dpaa2_eth_get_stats(struct net_device *net_dev,
+				struct rtnl_link_stats64 *stats)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct rtnl_link_stats64 *percpu_stats;
+	u64 *cpustats;
+	u64 *netstats = (u64 *)stats;
+	int i, j;
+	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+	for_each_possible_cpu(i) {
+		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
+		cpustats = (u64 *)percpu_stats;
+		for (j = 0; j < num; j++)
+			netstats[j] += cpustats[j];
+	}
+}
+
+/* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void add_uc_hw_addr(const struct net_device *net_dev,
+			   struct dpaa2_eth_priv *priv)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netdev_for_each_uc_addr(ha, net_dev) {
+		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+					ha->addr);
+		if (err)
+			netdev_warn(priv->net_dev,
+				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
+				    ha->addr, err);
+	}
+}
+
+/* Copy mac multicast addresses from @net_dev to @priv
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void add_mc_hw_addr(const struct net_device *net_dev,
+			   struct dpaa2_eth_priv *priv)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netdev_for_each_mc_addr(ha, net_dev) {
+		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+					ha->addr);
+		if (err)
+			netdev_warn(priv->net_dev,
+				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
+				    ha->addr, err);
+	}
+}
+
+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int uc_count = netdev_uc_count(net_dev);
+	int mc_count = netdev_mc_count(net_dev);
+	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
+	u32 options = priv->dpni_attrs.options;
+	u16 mc_token = priv->mc_token;
+	struct fsl_mc_io *mc_io = priv->mc_io;
+	int err;
+
+	/* Basic sanity checks; these probably indicate a misconfiguration */
+	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
+		netdev_info(net_dev,
+			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
+			    max_mac);
+
+	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
+	if (uc_count > max_mac) {
+		netdev_info(net_dev,
+			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
+			    uc_count, max_mac);
+		goto force_promisc;
+	}
+	if (mc_count + uc_count > max_mac) {
+		netdev_info(net_dev,
+			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
+			    uc_count + mc_count, max_mac);
+		goto force_mc_promisc;
+	}
+
+	/* Adjust promisc settings due to flag combinations */
+	if (net_dev->flags & IFF_PROMISC)
+		goto force_promisc;
+	if (net_dev->flags & IFF_ALLMULTI) {
+		/* First, rebuild unicast filtering table. This should be done
+		 * in promisc mode, in order to avoid frame loss while we
+		 * progressively add entries to the table.
+		 * We don't know whether we had been in promisc already, and
+		 * making an MC call to find out is expensive; so set uc promisc
+		 * nonetheless.
+		 */
+		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+		if (err)
+			netdev_warn(net_dev, "Can't set uc promisc\n");
+
+		/* Actual uc table reconstruction. */
+		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
+		if (err)
+			netdev_warn(net_dev, "Can't clear uc filters\n");
+		add_uc_hw_addr(net_dev, priv);
+
+		/* Finally, clear uc promisc and set mc promisc as requested. */
+		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+		if (err)
+			netdev_warn(net_dev, "Can't clear uc promisc\n");
+		goto force_mc_promisc;
+	}
+
+	/* Neither unicast, nor multicast promisc will be on... eventually.
+	 * For now, rebuild mac filtering tables while forcing both of them on.
+	 */
+	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
+	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
+	/* Actual mac filtering tables reconstruction */
+	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't clear mac filters\n");
+	add_mc_hw_addr(net_dev, priv);
+	add_uc_hw_addr(net_dev, priv);
+
+	/* Now we can clear both ucast and mcast promisc, without risking
+	 * to drop legitimate frames anymore.
+	 */
+	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+	if (err)
+		netdev_warn(net_dev, "Can't clear ucast promisc\n");
+	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
+	if (err)
+		netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
+	return;
+
+force_promisc:
+	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set ucast promisc\n");
+force_mc_promisc:
+	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+	if (err)
+		netdev_warn(net_dev, "Can't set mcast promisc\n");
+}
+
+static int dpaa2_eth_set_features(struct net_device *net_dev,
+				  netdev_features_t features)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	netdev_features_t changed = features ^ net_dev->features;
+	bool enable;
+	int err;
+
+	if (changed & NETIF_F_RXCSUM) {
+		enable = !!(features & NETIF_F_RXCSUM);
+		err = set_rx_csum(priv, enable);
+		if (err)
+			return err;
+	}
+
+	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+		err = set_tx_csum(priv, enable);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+	struct hwtstamp_config config;
+
+	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		priv->tx_tstamp = false;
+		break;
+	case HWTSTAMP_TX_ON:
+		priv->tx_tstamp = true;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+		priv->rx_tstamp = false;
+	} else {
+		priv->rx_tstamp = true;
+		/* TS is set for all frame types, not only those requested */
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+	}
+
+	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+			-EFAULT : 0;
+}
+
+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	if (cmd == SIOCSHWTSTAMP)
+		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
+	return -EINVAL;
+}
+
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+	int mfl, linear_mfl;
+
+	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+	linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
+
+	if (mfl > linear_mfl) {
+		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+			    linear_mfl - VLAN_ETH_HLEN);
+		return false;
+	}
+
+	return true;
+}
+
+static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+	int mfl, err;
+
+	/* We enforce a maximum Rx frame length based on MTU only if we have
+	 * an XDP program attached (in order to avoid Rx S/G frames).
+	 * Otherwise, we accept all incoming frames as long as they are not
+	 * larger than maximum size supported in hardware
+	 */
+	if (has_xdp)
+		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+	else
+		mfl = DPAA2_ETH_MFL;
+
+	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+	int err;
+
+	if (!priv->xdp_prog)
+		goto out;
+
+	if (!xdp_mtu_valid(priv, new_mtu))
+		return -EINVAL;
+
+	err = set_rx_mfl(priv, new_mtu, true);
+	if (err)
+		return err;
+
+out:
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+	struct dpni_buffer_layout buf_layout = {0};
+	int err;
+
+	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_RX, &buf_layout);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+		return err;
+	}
+
+	/* Reserve extra headroom for XDP header size changes */
+	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_RX, &buf_layout);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+	struct dpaa2_eth_channel *ch;
+	struct bpf_prog *old;
+	bool up, need_update;
+	int i, err;
+
+	if (prog && !xdp_mtu_valid(priv, dev->mtu))
+		return -EINVAL;
+
+	if (prog) {
+		prog = bpf_prog_add(prog, priv->num_channels);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+	}
+
+	up = netif_running(dev);
+	need_update = (!!priv->xdp_prog != !!prog);
+
+	if (up)
+		dpaa2_eth_stop(dev);
+
+	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
+	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+	 * so we are sure no old format buffers will be used from now on.
+	 */
+	if (need_update) {
+		err = set_rx_mfl(priv, dev->mtu, !!prog);
+		if (err)
+			goto out_err;
+		err = update_rx_buffer_headroom(priv, !!prog);
+		if (err)
+			goto out_err;
+	}
+
+	old = xchg(&priv->xdp_prog, prog);
+	if (old)
+		bpf_prog_put(old);
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		old = xchg(&ch->xdp.prog, prog);
+		if (old)
+			bpf_prog_put(old);
+	}
+
+	if (up) {
+		err = dpaa2_eth_open(dev);
+		if (err)
+			return err;
+	}
+
+	return 0;
+
+out_err:
+	if (prog)
+		bpf_prog_sub(prog, priv->num_channels);
+	if (up)
+		dpaa2_eth_open(dev);
+
+	return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+	switch (xdp->command) {
+	case XDP_SETUP_PROG:
+		return setup_xdp(dev, xdp->prog);
+	case XDP_QUERY_PROG:
+		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
+				    struct xdp_frame *xdpf)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct device *dev = net_dev->dev.parent;
+	struct rtnl_link_stats64 *percpu_stats;
+	struct dpaa2_eth_drv_stats *percpu_extras;
+	unsigned int needed_headroom;
+	struct dpaa2_eth_swa *swa;
+	struct dpaa2_eth_fq *fq;
+	struct dpaa2_fd fd;
+	void *buffer_start, *aligned_start;
+	dma_addr_t addr;
+	int err, i;
+
+	/* We require a minimum headroom to be able to transmit the frame.
+	 * Otherwise return an error and let the original net_device handle it
+	 */
+	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+	if (xdpf->headroom < needed_headroom)
+		return -EINVAL;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+	percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+	/* Setup the FD fields */
+	memset(&fd, 0, sizeof(fd));
+
+	/* Align FD address, if possible */
+	buffer_start = xdpf->data - needed_headroom;
+	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+				  DPAA2_ETH_TX_BUF_ALIGN);
+	if (aligned_start >= xdpf->data - xdpf->headroom)
+		buffer_start = aligned_start;
+
+	swa = (struct dpaa2_eth_swa *)buffer_start;
+	/* fill in necessary fields here */
+	swa->type = DPAA2_ETH_SWA_XDP;
+	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
+	swa->xdp.xdpf = xdpf;
+
+	addr = dma_map_single(dev, buffer_start,
+			      swa->xdp.dma_size,
+			      DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dev, addr))) {
+		percpu_stats->tx_dropped++;
+		return -ENOMEM;
+	}
+
+	dpaa2_fd_set_addr(&fd, addr);
+	dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
+	dpaa2_fd_set_len(&fd, xdpf->len);
+	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
+	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
+
+	fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
+	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+		err = priv->enqueue(priv, fq, &fd, 0);
+		if (err != -EBUSY)
+			break;
+	}
+	percpu_extras->tx_portal_busy += i;
+	if (unlikely(err < 0)) {
+		percpu_stats->tx_errors++;
+		/* let the Rx device handle the cleanup */
+		return err;
+	}
+
+	percpu_stats->tx_packets++;
+	percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+
+	return 0;
+}
+
+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+			      struct xdp_frame **frames, u32 flags)
+{
+	int drops = 0;
+	int i, err;
+
+	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+		return -EINVAL;
+
+	if (!netif_running(net_dev))
+		return -ENETDOWN;
+
+	for (i = 0; i < n; i++) {
+		struct xdp_frame *xdpf = frames[i];
+
+		err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
+		if (err) {
+			xdp_return_frame_rx_napi(xdpf);
+			drops++;
+		}
+	}
+
+	return n - drops;
+}
+
+static int update_xps(struct dpaa2_eth_priv *priv)
+{
+	struct net_device *net_dev = priv->net_dev;
+	struct cpumask xps_mask;
+	struct dpaa2_eth_fq *fq;
+	int i, num_queues, netdev_queues;
+	int err = 0;
+
+	num_queues = dpaa2_eth_queue_count(priv);
+	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+
+	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
+	 * queues, so only process those
+	 */
+	for (i = 0; i < netdev_queues; i++) {
+		fq = &priv->fq[i % num_queues];
+
+		cpumask_clear(&xps_mask);
+		cpumask_set_cpu(fq->target_cpu, &xps_mask);
+
+		err = netif_set_xps_queue(net_dev, &xps_mask, i);
+		if (err) {
+			netdev_warn_once(net_dev, "Error setting XPS queue\n");
+			break;
+		}
+	}
+
+	return err;
+}
+
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+			      enum tc_setup_type type, void *type_data)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct tc_mqprio_qopt *mqprio = type_data;
+	u8 num_tc, num_queues;
+	int i;
+
+	if (type != TC_SETUP_QDISC_MQPRIO)
+		return -EINVAL;
+
+	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+	num_queues = dpaa2_eth_queue_count(priv);
+	num_tc = mqprio->num_tc;
+
+	if (num_tc == net_dev->num_tc)
+		return 0;
+
+	if (num_tc  > dpaa2_eth_tc_count(priv)) {
+		netdev_err(net_dev, "Max %d traffic classes supported\n",
+			   dpaa2_eth_tc_count(priv));
+		return -EINVAL;
+	}
+
+	if (!num_tc) {
+		netdev_reset_tc(net_dev);
+		netif_set_real_num_tx_queues(net_dev, num_queues);
+		goto out;
+	}
+
+	netdev_set_num_tc(net_dev, num_tc);
+	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
+
+	for (i = 0; i < num_tc; i++)
+		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
+
+out:
+	update_xps(priv);
+
+	return 0;
+}
+
+static const struct net_device_ops dpaa2_eth_ops = {
+	.ndo_open = dpaa2_eth_open,
+	.ndo_start_xmit = dpaa2_eth_tx,
+	.ndo_stop = dpaa2_eth_stop,
+	.ndo_set_mac_address = dpaa2_eth_set_addr,
+	.ndo_get_stats64 = dpaa2_eth_get_stats,
+	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
+	.ndo_set_features = dpaa2_eth_set_features,
+	.ndo_do_ioctl = dpaa2_eth_ioctl,
+	.ndo_change_mtu = dpaa2_eth_change_mtu,
+	.ndo_bpf = dpaa2_eth_xdp,
+	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+	.ndo_setup_tc = dpaa2_eth_setup_tc,
+};
+
+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+	struct dpaa2_eth_channel *ch;
+
+	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
+
+	/* Update NAPI statistics */
+	ch->stats.cdan++;
+
+	napi_schedule_irqoff(&ch->napi);
+}
+
+/* Allocate and configure a DPCON object */
+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+{
+	struct fsl_mc_device *dpcon;
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpcon_attr attrs;
+	int err;
+
+	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
+				     FSL_MC_POOL_DPCON, &dpcon);
+	if (err) {
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+		return ERR_PTR(err);
+	}
+
+	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
+	if (err) {
+		dev_err(dev, "dpcon_open() failed\n");
+		goto free;
+	}
+
+	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
+	if (err) {
+		dev_err(dev, "dpcon_reset() failed\n");
+		goto close;
+	}
+
+	err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
+	if (err) {
+		dev_err(dev, "dpcon_get_attributes() failed\n");
+		goto close;
+	}
+
+	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
+	if (err) {
+		dev_err(dev, "dpcon_enable() failed\n");
+		goto close;
+	}
+
+	return dpcon;
+
+close:
+	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+free:
+	fsl_mc_object_free(dpcon);
+
+	return NULL;
+}
+
+static void free_dpcon(struct dpaa2_eth_priv *priv,
+		       struct fsl_mc_device *dpcon)
+{
+	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
+	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+	fsl_mc_object_free(dpcon);
+}
+
+static struct dpaa2_eth_channel *
+alloc_channel(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_eth_channel *channel;
+	struct dpcon_attr attr;
+	struct device *dev = priv->net_dev->dev.parent;
+	int err;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return NULL;
+
+	channel->dpcon = setup_dpcon(priv);
+	if (IS_ERR_OR_NULL(channel->dpcon)) {
+		err = PTR_ERR_OR_ZERO(channel->dpcon);
+		goto err_setup;
+	}
+
+	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
+				   &attr);
+	if (err) {
+		dev_err(dev, "dpcon_get_attributes() failed\n");
+		goto err_get_attr;
+	}
+
+	channel->dpcon_id = attr.id;
+	channel->ch_id = attr.qbman_ch_id;
+	channel->priv = priv;
+
+	return channel;
+
+err_get_attr:
+	free_dpcon(priv, channel->dpcon);
+err_setup:
+	kfree(channel);
+	return ERR_PTR(err);
+}
+
+static void free_channel(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_channel *channel)
+{
+	free_dpcon(priv, channel->dpcon);
+	kfree(channel);
+}
+
+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
+ * and register data availability notifications
+ */
+static int setup_dpio(struct dpaa2_eth_priv *priv)
+{
+	struct dpaa2_io_notification_ctx *nctx;
+	struct dpaa2_eth_channel *channel;
+	struct dpcon_notification_cfg dpcon_notif_cfg;
+	struct device *dev = priv->net_dev->dev.parent;
+	int i, err;
+
+	/* We want the ability to spread ingress traffic (RX, TX conf) to as
+	 * many cores as possible, so we need one channel for each core
+	 * (unless there's fewer queues than cores, in which case the extra
+	 * channels would be wasted).
+	 * Allocate one channel per core and register it to the core's
+	 * affine DPIO. If not enough channels are available for all cores
+	 * or if some cores don't have an affine DPIO, there will be no
+	 * ingress frame processing on those cores.
+	 */
+	cpumask_clear(&priv->dpio_cpumask);
+	for_each_online_cpu(i) {
+		/* Try to allocate a channel */
+		channel = alloc_channel(priv);
+		if (IS_ERR_OR_NULL(channel)) {
+			err = PTR_ERR_OR_ZERO(channel);
+			if (err != -EPROBE_DEFER)
+				dev_info(dev,
+					 "No affine channel for cpu %d and above\n", i);
+			goto err_alloc_ch;
+		}
+
+		priv->channel[priv->num_channels] = channel;
+
+		nctx = &channel->nctx;
+		nctx->is_cdan = 1;
+		nctx->cb = cdan_cb;
+		nctx->id = channel->ch_id;
+		nctx->desired_cpu = i;
+
+		/* Register the new context */
+		channel->dpio = dpaa2_io_service_select(i);
+		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
+		if (err) {
+			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
+			/* If no affine DPIO for this core, there's probably
+			 * none available for next cores either. Signal we want
+			 * to retry later, in case the DPIO devices weren't
+			 * probed yet.
+			 */
+			err = -EPROBE_DEFER;
+			goto err_service_reg;
+		}
+
+		/* Register DPCON notification with MC */
+		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
+		dpcon_notif_cfg.priority = 0;
+		dpcon_notif_cfg.user_ctx = nctx->qman64;
+		err = dpcon_set_notification(priv->mc_io, 0,
+					     channel->dpcon->mc_handle,
+					     &dpcon_notif_cfg);
+		if (err) {
+			dev_err(dev, "dpcon_set_notification failed()\n");
+			goto err_set_cdan;
+		}
+
+		/* If we managed to allocate a channel and also found an affine
+		 * DPIO for this core, add it to the final mask
+		 */
+		cpumask_set_cpu(i, &priv->dpio_cpumask);
+		priv->num_channels++;
+
+		/* Stop if we already have enough channels to accommodate all
+		 * RX and TX conf queues
+		 */
+		if (priv->num_channels == priv->dpni_attrs.num_queues)
+			break;
+	}
+
+	return 0;
+
+err_set_cdan:
+	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+err_service_reg:
+	free_channel(priv, channel);
+err_alloc_ch:
+	if (err == -EPROBE_DEFER) {
+		for (i = 0; i < priv->num_channels; i++) {
+			channel = priv->channel[i];
+			nctx = &channel->nctx;
+			dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+			free_channel(priv, channel);
+		}
+		priv->num_channels = 0;
+		return err;
+	}
+
+	if (cpumask_empty(&priv->dpio_cpumask)) {
+		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
+		return -ENODEV;
+	}
+
+	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
+		 cpumask_pr_args(&priv->dpio_cpumask));
+
+	return 0;
+}
+
+static void free_dpio(struct dpaa2_eth_priv *priv)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpaa2_eth_channel *ch;
+	int i;
+
+	/* deregister CDAN notifications and free channels */
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
+		free_channel(priv, ch);
+	}
+}
+
+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
+						    int cpu)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++)
+		if (priv->channel[i]->nctx.desired_cpu == cpu)
+			return priv->channel[i];
+
+	/* We should never get here. Issue a warning and return
+	 * the first channel, because it's still better than nothing
+	 */
+	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
+	return priv->channel[0];
+}
+
+static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpaa2_eth_fq *fq;
+	int rx_cpu, txc_cpu;
+	int i;
+
+	/* For each FQ, pick one channel/CPU to deliver frames to.
+	 * This may well change at runtime, either through irqbalance or
+	 * through direct user intervention.
+	 */
+	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
+	for (i = 0; i < priv->num_fqs; i++) {
+		fq = &priv->fq[i];
+		switch (fq->type) {
+		case DPAA2_RX_FQ:
+			fq->target_cpu = rx_cpu;
+			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
+			if (rx_cpu >= nr_cpu_ids)
+				rx_cpu = cpumask_first(&priv->dpio_cpumask);
+			break;
+		case DPAA2_TX_CONF_FQ:
+			fq->target_cpu = txc_cpu;
+			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
+			if (txc_cpu >= nr_cpu_ids)
+				txc_cpu = cpumask_first(&priv->dpio_cpumask);
+			break;
+		default:
+			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
+		}
+		fq->channel = get_affine_channel(priv, fq->target_cpu);
+	}
+
+	update_xps(priv);
+}
+
+static void setup_fqs(struct dpaa2_eth_priv *priv)
+{
+	int i;
+
+	/* We have one TxConf FQ per Tx flow.
+	 * The number of Tx and Rx queues is the same.
+	 * Tx queues come first in the fq array.
+	 */
+	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
+		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
+		priv->fq[priv->num_fqs++].flowid = (u16)i;
+	}
+
+	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+		priv->fq[priv->num_fqs++].flowid = (u16)i;
+	}
+
+	/* For each FQ, decide on which core to process incoming frames */
+	set_fq_affinity(priv);
+}
+
+/* Allocate and configure one buffer pool for each interface */
+static int setup_dpbp(struct dpaa2_eth_priv *priv)
+{
+	int err;
+	struct fsl_mc_device *dpbp_dev;
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpbp_attr dpbp_attrs;
+
+	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+				     &dpbp_dev);
+	if (err) {
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_err(dev, "DPBP device allocation failed\n");
+		return err;
+	}
+
+	priv->dpbp_dev = dpbp_dev;
+
+	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+			&dpbp_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpbp_open() failed\n");
+		goto err_open;
+	}
+
+	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpbp_reset() failed\n");
+		goto err_reset;
+	}
+
+	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpbp_enable() failed\n");
+		goto err_enable;
+	}
+
+	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
+				  &dpbp_attrs);
+	if (err) {
+		dev_err(dev, "dpbp_get_attributes() failed\n");
+		goto err_get_attr;
+	}
+	priv->bpid = dpbp_attrs.bpid;
+
+	return 0;
+
+err_get_attr:
+	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+	fsl_mc_object_free(dpbp_dev);
+
+	return err;
+}
+
+static void free_dpbp(struct dpaa2_eth_priv *priv)
+{
+	drain_pool(priv);
+	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+	fsl_mc_object_free(priv->dpbp_dev);
+}
+
+static int set_buffer_layout(struct dpaa2_eth_priv *priv)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_buffer_layout buf_layout = {0};
+	u16 rx_buf_align;
+	int err;
+
+	/* We need to check for WRIOP version 1.0.0, but depending on the MC
+	 * version, this number is not always provided correctly on rev1.
+	 * We need to check for both alternatives in this situation.
+	 */
+	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
+		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+	else
+		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
+	/* tx buffer */
+	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+	buf_layout.pass_timestamp = true;
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_TX, &buf_layout);
+	if (err) {
+		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
+		return err;
+	}
+
+	/* tx-confirm buffer */
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
+	if (err) {
+		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
+		return err;
+	}
+
+	/* Now that we've set our tx buffer layout, retrieve the minimum
+	 * required tx data offset.
+	 */
+	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
+				      &priv->tx_data_offset);
+	if (err) {
+		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
+		return err;
+	}
+
+	if ((priv->tx_data_offset % 64) != 0)
+		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
+			 priv->tx_data_offset);
+
+	/* rx buffer */
+	buf_layout.pass_frame_status = true;
+	buf_layout.pass_parser_result = true;
+	buf_layout.data_align = rx_buf_align;
+	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
+	buf_layout.private_data_size = 0;
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_RX, &buf_layout);
+	if (err) {
+		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+#define DPNI_ENQUEUE_FQID_VER_MAJOR	7
+#define DPNI_ENQUEUE_FQID_VER_MINOR	9
+
+static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
+				       struct dpaa2_eth_fq *fq,
+				       struct dpaa2_fd *fd, u8 prio)
+{
+	return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+					   priv->tx_qdid, prio,
+					   fq->tx_qdbin, fd);
+}
+
+static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
+				       struct dpaa2_eth_fq *fq,
+				       struct dpaa2_fd *fd, u8 prio)
+{
+	return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
+					   fq->tx_fqid[prio], fd);
+}
+
+static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
+{
+	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+		priv->enqueue = dpaa2_eth_enqueue_qd;
+	else
+		priv->enqueue = dpaa2_eth_enqueue_fq;
+}
+
+static int set_pause(struct dpaa2_eth_priv *priv)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_link_cfg link_cfg = {0};
+	int err;
+
+	/* Get the default link options so we don't override other flags */
+	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+	if (err) {
+		dev_err(dev, "dpni_get_link_cfg() failed\n");
+		return err;
+	}
+
+	/* By default, enable both Rx and Tx pause frames */
+	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
+	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+	if (err) {
+		dev_err(dev, "dpni_set_link_cfg() failed\n");
+		return err;
+	}
+
+	priv->link_state.options = link_cfg.options;
+
+	return 0;
+}
+
+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+	struct dpni_queue_id qid = {0};
+	struct dpaa2_eth_fq *fq;
+	struct dpni_queue queue;
+	int i, j, err;
+
+	/* We only use Tx FQIDs for FQID-based enqueue, so check
+	 * if DPNI version supports it before updating FQIDs
+	 */
+	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+		return;
+
+	for (i = 0; i < priv->num_fqs; i++) {
+		fq = &priv->fq[i];
+		if (fq->type != DPAA2_TX_CONF_FQ)
+			continue;
+		for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+			err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+					     DPNI_QUEUE_TX, j, fq->flowid,
+					     &queue, &qid);
+			if (err)
+				goto out_err;
+
+			fq->tx_fqid[j] = qid.fqid;
+			if (fq->tx_fqid[j] == 0)
+				goto out_err;
+		}
+	}
+
+	priv->enqueue = dpaa2_eth_enqueue_fq;
+
+	return;
+
+out_err:
+	netdev_info(priv->net_dev,
+		    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+	priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
+/* Configure the DPNI object this interface is associated with */
+static int setup_dpni(struct fsl_mc_device *ls_dev)
+{
+	struct device *dev = &ls_dev->dev;
+	struct dpaa2_eth_priv *priv;
+	struct net_device *net_dev;
+	int err;
+
+	net_dev = dev_get_drvdata(dev);
+	priv = netdev_priv(net_dev);
+
+	/* get a handle for the DPNI object */
+	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
+	if (err) {
+		dev_err(dev, "dpni_open() failed\n");
+		return err;
+	}
+
+	/* Check if we can work with this DPNI object */
+	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
+				   &priv->dpni_ver_minor);
+	if (err) {
+		dev_err(dev, "dpni_get_api_version() failed\n");
+		goto close;
+	}
+	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
+		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
+			priv->dpni_ver_major, priv->dpni_ver_minor,
+			DPNI_VER_MAJOR, DPNI_VER_MINOR);
+		err = -ENOTSUPP;
+		goto close;
+	}
+
+	ls_dev->mc_io = priv->mc_io;
+	ls_dev->mc_handle = priv->mc_token;
+
+	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+	if (err) {
+		dev_err(dev, "dpni_reset() failed\n");
+		goto close;
+	}
+
+	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+				  &priv->dpni_attrs);
+	if (err) {
+		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+		goto close;
+	}
+
+	err = set_buffer_layout(priv);
+	if (err)
+		goto close;
+
+	set_enqueue_mode(priv);
+
+	/* Enable pause frame support */
+	if (dpaa2_eth_has_pause_support(priv)) {
+		err = set_pause(priv);
+		if (err)
+			goto close;
+	}
+
+	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
+				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
+	if (!priv->cls_rules)
+		goto close;
+
+	return 0;
+
+close:
+	dpni_close(priv->mc_io, 0, priv->mc_token);
+
+	return err;
+}
+
+static void free_dpni(struct dpaa2_eth_priv *priv)
+{
+	int err;
+
+	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+	if (err)
+		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
+			    err);
+
+	dpni_close(priv->mc_io, 0, priv->mc_token);
+}
+
+static int setup_rx_flow(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_fq *fq)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_queue queue;
+	struct dpni_queue_id qid;
+	int err;
+
+	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+	if (err) {
+		dev_err(dev, "dpni_get_queue(RX) failed\n");
+		return err;
+	}
+
+	fq->fqid = qid.fqid;
+
+	queue.destination.id = fq->channel->dpcon_id;
+	queue.destination.type = DPNI_DEST_DPCON;
+	queue.destination.priority = 1;
+	queue.user_context = (u64)(uintptr_t)fq;
+	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_RX, 0, fq->flowid,
+			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+			     &queue);
+	if (err) {
+		dev_err(dev, "dpni_set_queue(RX) failed\n");
+		return err;
+	}
+
+	/* xdp_rxq setup */
+	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
+			       fq->flowid);
+	if (err) {
+		dev_err(dev, "xdp_rxq_info_reg failed\n");
+		return err;
+	}
+
+	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
+					 MEM_TYPE_PAGE_ORDER0, NULL);
+	if (err) {
+		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int setup_tx_flow(struct dpaa2_eth_priv *priv,
+			 struct dpaa2_eth_fq *fq)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_queue queue;
+	struct dpni_queue_id qid;
+	int i, err;
+
+	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_TX, i, fq->flowid,
+				     &queue, &qid);
+		if (err) {
+			dev_err(dev, "dpni_get_queue(TX) failed\n");
+			return err;
+		}
+		fq->tx_fqid[i] = qid.fqid;
+	}
+
+	/* All Tx queues belonging to the same flowid have the same qdbin */
+	fq->tx_qdbin = qid.qdbin;
+
+	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+			     &queue, &qid);
+	if (err) {
+		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
+		return err;
+	}
+
+	fq->fqid = qid.fqid;
+
+	queue.destination.id = fq->channel->dpcon_id;
+	queue.destination.type = DPNI_DEST_DPCON;
+	queue.destination.priority = 0;
+	queue.user_context = (u64)(uintptr_t)fq;
+	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+			     &queue);
+	if (err) {
+		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/* Supported header fields for Rx hash distribution key */
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
+	{
+		/* L2 header */
+		.rxnfc_field = RXH_L2DA,
+		.cls_prot = NET_PROT_ETH,
+		.cls_field = NH_FLD_ETH_DA,
+		.id = DPAA2_ETH_DIST_ETHDST,
+		.size = 6,
+	}, {
+		.cls_prot = NET_PROT_ETH,
+		.cls_field = NH_FLD_ETH_SA,
+		.id = DPAA2_ETH_DIST_ETHSRC,
+		.size = 6,
+	}, {
+		/* This is the last ethertype field parsed:
+		 * depending on frame format, it can be the MAC ethertype
+		 * or the VLAN etype.
+		 */
+		.cls_prot = NET_PROT_ETH,
+		.cls_field = NH_FLD_ETH_TYPE,
+		.id = DPAA2_ETH_DIST_ETHTYPE,
+		.size = 2,
+	}, {
+		/* VLAN header */
+		.rxnfc_field = RXH_VLAN,
+		.cls_prot = NET_PROT_VLAN,
+		.cls_field = NH_FLD_VLAN_TCI,
+		.id = DPAA2_ETH_DIST_VLAN,
+		.size = 2,
+	}, {
+		/* IP header */
+		.rxnfc_field = RXH_IP_SRC,
+		.cls_prot = NET_PROT_IP,
+		.cls_field = NH_FLD_IP_SRC,
+		.id = DPAA2_ETH_DIST_IPSRC,
+		.size = 4,
+	}, {
+		.rxnfc_field = RXH_IP_DST,
+		.cls_prot = NET_PROT_IP,
+		.cls_field = NH_FLD_IP_DST,
+		.id = DPAA2_ETH_DIST_IPDST,
+		.size = 4,
+	}, {
+		.rxnfc_field = RXH_L3_PROTO,
+		.cls_prot = NET_PROT_IP,
+		.cls_field = NH_FLD_IP_PROTO,
+		.id = DPAA2_ETH_DIST_IPPROTO,
+		.size = 1,
+	}, {
+		/* Using UDP ports, this is functionally equivalent to raw
+		 * byte pairs from L4 header.
+		 */
+		.rxnfc_field = RXH_L4_B_0_1,
+		.cls_prot = NET_PROT_UDP,
+		.cls_field = NH_FLD_UDP_PORT_SRC,
+		.id = DPAA2_ETH_DIST_L4SRC,
+		.size = 2,
+	}, {
+		.rxnfc_field = RXH_L4_B_2_3,
+		.cls_prot = NET_PROT_UDP,
+		.cls_field = NH_FLD_UDP_PORT_DST,
+		.id = DPAA2_ETH_DIST_L4DST,
+		.size = 2,
+	},
+};
+
+/* Configure the Rx hash key using the legacy API */
+static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_rx_tc_dist_cfg dist_cfg;
+	int err;
+
+	memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+	dist_cfg.key_cfg_iova = key;
+	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+	if (err)
+		dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+
+	return err;
+}
+
+/* Configure the Rx hash key using the new API */
+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_rx_dist_cfg dist_cfg;
+	int err;
+
+	memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+	dist_cfg.key_cfg_iova = key;
+	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+	dist_cfg.enable = 1;
+
+	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+	if (err)
+		dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+
+	return err;
+}
+
+/* Configure the Rx flow classification key */
+static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	struct dpni_rx_dist_cfg dist_cfg;
+	int err;
+
+	memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+	dist_cfg.key_cfg_iova = key;
+	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+	dist_cfg.enable = 1;
+
+	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+	if (err)
+		dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+
+	return err;
+}
+
+/* Size of the Rx flow classification key */
+int dpaa2_eth_cls_key_size(u64 fields)
+{
+	int i, size = 0;
+
+	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+		if (!(fields & dist_fields[i].id))
+			continue;
+		size += dist_fields[i].size;
+	}
+
+	return size;
+}
+
+/* Offset of header field in Rx classification key */
+int dpaa2_eth_cls_fld_off(int prot, int field)
+{
+	int i, off = 0;
+
+	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+		if (dist_fields[i].cls_prot == prot &&
+		    dist_fields[i].cls_field == field)
+			return off;
+		off += dist_fields[i].size;
+	}
+
+	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
+	return 0;
+}
+
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+	int off = 0, new_off = 0;
+	int i, size;
+
+	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+		size = dist_fields[i].size;
+		if (dist_fields[i].id & fields) {
+			memcpy(key_mem + new_off, key_mem + off, size);
+			new_off += size;
+		}
+		off += size;
+	}
+}
+
+/* Set Rx distribution (hash or flow classification) key
+ * flags is a combination of RXH_ bits
+ */
+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+				  enum dpaa2_eth_rx_dist type, u64 flags)
+{
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpkg_profile_cfg cls_cfg;
+	u32 rx_hash_fields = 0;
+	dma_addr_t key_iova;
+	u8 *dma_mem;
+	int i;
+	int err = 0;
+
+	memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+		struct dpkg_extract *key =
+			&cls_cfg.extracts[cls_cfg.num_extracts];
+
+		/* For both Rx hashing and classification keys
+		 * we set only the selected fields.
+		 */
+		if (!(flags & dist_fields[i].id))
+			continue;
+		if (type == DPAA2_ETH_RX_DIST_HASH)
+			rx_hash_fields |= dist_fields[i].rxnfc_field;
+
+		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+			dev_err(dev, "error adding key extraction rule, too many rules?\n");
+			return -E2BIG;
+		}
+
+		key->type = DPKG_EXTRACT_FROM_HDR;
+		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
+		key->extract.from_hdr.type = DPKG_FULL_FIELD;
+		key->extract.from_hdr.field = dist_fields[i].cls_field;
+		cls_cfg.num_extracts++;
+	}
+
+	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+	if (!dma_mem)
+		return -ENOMEM;
+
+	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+	if (err) {
+		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
+		goto free_key;
+	}
+
+	/* Prepare for setting the rx dist */
+	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+				  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, key_iova)) {
+		dev_err(dev, "DMA mapping failed\n");
+		err = -ENOMEM;
+		goto free_key;
+	}
+
+	if (type == DPAA2_ETH_RX_DIST_HASH) {
+		if (dpaa2_eth_has_legacy_dist(priv))
+			err = config_legacy_hash_key(priv, key_iova);
+		else
+			err = config_hash_key(priv, key_iova);
+	} else {
+		err = config_cls_key(priv, key_iova);
+	}
+
+	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+			 DMA_TO_DEVICE);
+	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
+		priv->rx_hash_fields = rx_hash_fields;
+
+free_key:
+	kfree(dma_mem);
+	return err;
+}
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	u64 key = 0;
+	int i;
+
+	if (!dpaa2_eth_hash_enabled(priv))
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+		if (dist_fields[i].rxnfc_field & flags)
+			key |= dist_fields[i].id;
+
+	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	int err;
+
+	/* Check if we actually support Rx flow classification */
+	if (dpaa2_eth_has_legacy_dist(priv)) {
+		dev_dbg(dev, "Rx cls not supported by current MC version\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (!dpaa2_eth_fs_enabled(priv)) {
+		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (!dpaa2_eth_hash_enabled(priv)) {
+		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* If there is no support for masking in the classification table,
+	 * we don't set a default key, as it will depend on the rules
+	 * added by the user at runtime.
+	 */
+	if (!dpaa2_eth_fs_mask_enabled(priv))
+		goto out;
+
+	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+	if (err)
+		return err;
+
+out:
+	priv->rx_cls_enabled = 1;
+
+	return 0;
+}
+
+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+ * frame queues and channels
+ */
+static int bind_dpni(struct dpaa2_eth_priv *priv)
+{
+	struct net_device *net_dev = priv->net_dev;
+	struct device *dev = net_dev->dev.parent;
+	struct dpni_pools_cfg pools_params;
+	struct dpni_error_cfg err_cfg;
+	int err = 0;
+	int i;
+
+	pools_params.num_dpbp = 1;
+	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+	pools_params.pools[0].backup_pool = 0;
+	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+	if (err) {
+		dev_err(dev, "dpni_set_pools() failed\n");
+		return err;
+	}
+
+	/* have the interface implicitly distribute traffic based on
+	 * the default hash key
+	 */
+	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
+	if (err && err != -EOPNOTSUPP)
+		dev_err(dev, "Failed to configure hashing\n");
+
+	/* Configure the flow classification key; it includes all
+	 * supported header fields and cannot be modified at runtime
+	 */
+	err = dpaa2_eth_set_default_cls(priv);
+	if (err && err != -EOPNOTSUPP)
+		dev_err(dev, "Failed to configure Rx classification key\n");
+
+	/* Configure handling of error frames */
+	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+	err_cfg.set_frame_annotation = 1;
+	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
+				       &err_cfg);
+	if (err) {
+		dev_err(dev, "dpni_set_errors_behavior failed\n");
+		return err;
+	}
+
+	/* Configure Rx and Tx conf queues to generate CDANs */
+	for (i = 0; i < priv->num_fqs; i++) {
+		switch (priv->fq[i].type) {
+		case DPAA2_RX_FQ:
+			err = setup_rx_flow(priv, &priv->fq[i]);
+			break;
+		case DPAA2_TX_CONF_FQ:
+			err = setup_tx_flow(priv, &priv->fq[i]);
+			break;
+		default:
+			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
+			return -EINVAL;
+		}
+		if (err)
+			return err;
+	}
+
+	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+			    DPNI_QUEUE_TX, &priv->tx_qdid);
+	if (err) {
+		dev_err(dev, "dpni_get_qdid() failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/* Allocate rings for storing incoming frame descriptors */
+static int alloc_rings(struct dpaa2_eth_priv *priv)
+{
+	struct net_device *net_dev = priv->net_dev;
+	struct device *dev = net_dev->dev.parent;
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		priv->channel[i]->store =
+			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
+		if (!priv->channel[i]->store) {
+			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
+			goto err_ring;
+		}
+	}
+
+	return 0;
+
+err_ring:
+	for (i = 0; i < priv->num_channels; i++) {
+		if (!priv->channel[i]->store)
+			break;
+		dpaa2_io_store_destroy(priv->channel[i]->store);
+	}
+
+	return -ENOMEM;
+}
+
+static void free_rings(struct dpaa2_eth_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_channels; i++)
+		dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
+static int set_mac_addr(struct dpaa2_eth_priv *priv)
+{
+	struct net_device *net_dev = priv->net_dev;
+	struct device *dev = net_dev->dev.parent;
+	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
+	int err;
+
+	/* Get firmware address, if any */
+	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
+	if (err) {
+		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
+		return err;
+	}
+
+	/* Get DPNI attributes address, if any */
+	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+					dpni_mac_addr);
+	if (err) {
+		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
+		return err;
+	}
+
+	/* First check if firmware has any address configured by bootloader */
+	if (!is_zero_ether_addr(mac_addr)) {
+		/* If the DPMAC addr != DPNI addr, update it */
+		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
+			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
+							priv->mc_token,
+							mac_addr);
+			if (err) {
+				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+				return err;
+			}
+		}
+		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+	} else if (is_zero_ether_addr(dpni_mac_addr)) {
+		/* No MAC address configured, fill in net_dev->dev_addr
+		 * with a random one
+		 */
+		eth_hw_addr_random(net_dev);
+		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+						net_dev->dev_addr);
+		if (err) {
+			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+			return err;
+		}
+
+		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+		 * practical purposes, this will be our "permanent" mac address,
+		 * at least until the next reboot. This move will also permit
+		 * register_netdevice() to properly fill up net_dev->perm_addr.
+		 */
+		net_dev->addr_assign_type = NET_ADDR_PERM;
+	} else {
+		/* NET_ADDR_PERM is default, all we have to do is
+		 * fill in the device addr.
+		 */
+		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+	}
+
+	return 0;
+}
+
+static int netdev_init(struct net_device *net_dev)
+{
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	u32 options = priv->dpni_attrs.options;
+	u64 supported = 0, not_supported = 0;
+	u8 bcast_addr[ETH_ALEN];
+	u8 num_queues;
+	int err;
+
+	net_dev->netdev_ops = &dpaa2_eth_ops;
+	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+
+	err = set_mac_addr(priv);
+	if (err)
+		return err;
+
+	/* Explicitly add the broadcast address to the MAC filtering table */
+	eth_broadcast_addr(bcast_addr);
+	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
+	if (err) {
+		dev_err(dev, "dpni_add_mac_addr() failed\n");
+		return err;
+	}
+
+	/* Set MTU upper limit; lower limit is 68B (default value) */
+	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+					DPAA2_ETH_MFL);
+	if (err) {
+		dev_err(dev, "dpni_set_max_frame_length() failed\n");
+		return err;
+	}
+
+	/* Set actual number of queues in the net device */
+	num_queues = dpaa2_eth_queue_count(priv);
+	err = netif_set_real_num_tx_queues(net_dev, num_queues);
+	if (err) {
+		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
+		return err;
+	}
+	err = netif_set_real_num_rx_queues(net_dev, num_queues);
+	if (err) {
+		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
+		return err;
+	}
+
+	/* Capabilities listing */
+	supported |= IFF_LIVE_ADDR_CHANGE;
+
+	if (options & DPNI_OPT_NO_MAC_FILTER)
+		not_supported |= IFF_UNICAST_FLT;
+	else
+		supported |= IFF_UNICAST_FLT;
+
+	net_dev->priv_flags |= supported;
+	net_dev->priv_flags &= ~not_supported;
+
+	/* Features */
+	net_dev->features = NETIF_F_RXCSUM |
+			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			    NETIF_F_SG | NETIF_F_HIGHDMA |
+			    NETIF_F_LLTX;
+	net_dev->hw_features = net_dev->features;
+
+	return 0;
+}
+
+static int poll_link_state(void *arg)
+{
+	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+	int err;
+
+	while (!kthread_should_stop()) {
+		err = link_state_update(priv);
+		if (unlikely(err))
+			return err;
+
+		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
+	}
+
+	return 0;
+}
+
+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
+{
+	u32 status = ~0;
+	struct device *dev = (struct device *)arg;
+	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
+	struct net_device *net_dev = dev_get_drvdata(dev);
+	int err;
+
+	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+				  DPNI_IRQ_INDEX, &status);
+	if (unlikely(err)) {
+		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
+		return IRQ_HANDLED;
+	}
+
+	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
+		link_state_update(netdev_priv(net_dev));
+
+	if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+		set_mac_addr(netdev_priv(net_dev));
+
+	return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct fsl_mc_device *ls_dev)
+{
+	int err = 0;
+	struct fsl_mc_device_irq *irq;
+
+	err = fsl_mc_allocate_irqs(ls_dev);
+	if (err) {
+		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
+		return err;
+	}
+
+	irq = ls_dev->irqs[0];
+	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+					NULL, dpni_irq0_handler_thread,
+					IRQF_NO_SUSPEND | IRQF_ONESHOT,
+					dev_name(&ls_dev->dev), &ls_dev->dev);
+	if (err < 0) {
+		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
+		goto free_mc_irq;
+	}
+
+	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+				DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
+	if (err < 0) {
+		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
+		goto free_irq;
+	}
+
+	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+				  DPNI_IRQ_INDEX, 1);
+	if (err < 0) {
+		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
+		goto free_irq;
+	}
+
+	return 0;
+
+free_irq:
+	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+free_mc_irq:
+	fsl_mc_free_irqs(ls_dev);
+
+	return err;
+}
+
+static void add_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	int i;
+	struct dpaa2_eth_channel *ch;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
+		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
+			       NAPI_POLL_WEIGHT);
+	}
+}
+
+static void del_ch_napi(struct dpaa2_eth_priv *priv)
+{
+	int i;
+	struct dpaa2_eth_channel *ch;
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		netif_napi_del(&ch->napi);
+	}
+}
+
+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+{
+	struct device *dev;
+	struct net_device *net_dev = NULL;
+	struct dpaa2_eth_priv *priv = NULL;
+	int err = 0;
+
+	dev = &dpni_dev->dev;
+
+	/* Net device */
+	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
+	if (!net_dev) {
+		dev_err(dev, "alloc_etherdev_mq() failed\n");
+		return -ENOMEM;
+	}
+
+	SET_NETDEV_DEV(net_dev, dev);
+	dev_set_drvdata(dev, net_dev);
+
+	priv = netdev_priv(net_dev);
+	priv->net_dev = net_dev;
+
+	priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
+	/* Obtain a MC portal */
+	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+				     &priv->mc_io);
+	if (err) {
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_err(dev, "MC portal allocation failed\n");
+		goto err_portal_alloc;
+	}
+
+	/* MC objects initialization and configuration */
+	err = setup_dpni(dpni_dev);
+	if (err)
+		goto err_dpni_setup;
+
+	err = setup_dpio(priv);
+	if (err)
+		goto err_dpio_setup;
+
+	setup_fqs(priv);
+
+	err = setup_dpbp(priv);
+	if (err)
+		goto err_dpbp_setup;
+
+	err = bind_dpni(priv);
+	if (err)
+		goto err_bind;
+
+	/* Add a NAPI context for each channel */
+	add_ch_napi(priv);
+
+	/* Percpu statistics */
+	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
+	if (!priv->percpu_stats) {
+		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
+		err = -ENOMEM;
+		goto err_alloc_percpu_stats;
+	}
+	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
+	if (!priv->percpu_extras) {
+		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
+		err = -ENOMEM;
+		goto err_alloc_percpu_extras;
+	}
+
+	err = netdev_init(net_dev);
+	if (err)
+		goto err_netdev_init;
+
+	/* Configure checksum offload based on current interface flags */
+	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+	if (err)
+		goto err_csum;
+
+	err = set_tx_csum(priv, !!(net_dev->features &
+				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+	if (err)
+		goto err_csum;
+
+	err = alloc_rings(priv);
+	if (err)
+		goto err_alloc_rings;
+
+	err = setup_irqs(dpni_dev);
+	if (err) {
+		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
+		priv->poll_thread = kthread_run(poll_link_state, priv,
+						"%s_poll_link", net_dev->name);
+		if (IS_ERR(priv->poll_thread)) {
+			dev_err(dev, "Error starting polling thread\n");
+			goto err_poll_thread;
+		}
+		priv->do_link_poll = true;
+	}
+
+	err = register_netdev(net_dev);
+	if (err < 0) {
+		dev_err(dev, "register_netdev() failed\n");
+		goto err_netdev_reg;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	dpaa2_dbg_add(priv);
+#endif
+
+	dev_info(dev, "Probed interface %s\n", net_dev->name);
+	return 0;
+
+err_netdev_reg:
+	if (priv->do_link_poll)
+		kthread_stop(priv->poll_thread);
+	else
+		fsl_mc_free_irqs(dpni_dev);
+err_poll_thread:
+	free_rings(priv);
+err_alloc_rings:
+err_csum:
+err_netdev_init:
+	free_percpu(priv->percpu_extras);
+err_alloc_percpu_extras:
+	free_percpu(priv->percpu_stats);
+err_alloc_percpu_stats:
+	del_ch_napi(priv);
+err_bind:
+	free_dpbp(priv);
+err_dpbp_setup:
+	free_dpio(priv);
+err_dpio_setup:
+	free_dpni(priv);
+err_dpni_setup:
+	fsl_mc_portal_free(priv->mc_io);
+err_portal_alloc:
+	dev_set_drvdata(dev, NULL);
+	free_netdev(net_dev);
+
+	return err;
+}
+
+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+{
+	struct device *dev;
+	struct net_device *net_dev;
+	struct dpaa2_eth_priv *priv;
+
+	dev = &ls_dev->dev;
+	net_dev = dev_get_drvdata(dev);
+	priv = netdev_priv(net_dev);
+
+#ifdef CONFIG_DEBUG_FS
+	dpaa2_dbg_remove(priv);
+#endif
+	unregister_netdev(net_dev);
+
+	if (priv->do_link_poll)
+		kthread_stop(priv->poll_thread);
+	else
+		fsl_mc_free_irqs(ls_dev);
+
+	free_rings(priv);
+	free_percpu(priv->percpu_stats);
+	free_percpu(priv->percpu_extras);
+
+	del_ch_napi(priv);
+	free_dpbp(priv);
+	free_dpio(priv);
+	free_dpni(priv);
+
+	fsl_mc_portal_free(priv->mc_io);
+
+	free_netdev(net_dev);
+
+	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+	return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
+	{
+		.vendor = FSL_MC_VENDOR_FREESCALE,
+		.obj_type = "dpni",
+	},
+	{ .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
+static struct fsl_mc_driver dpaa2_eth_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = dpaa2_eth_probe,
+	.remove = dpaa2_eth_remove,
+	.match_id_table = dpaa2_eth_match_id_table
+};
+
+static int __init dpaa2_eth_driver_init(void)
+{
+	int err;
+
+	dpaa2_eth_dbg_init();
+	err = fsl_mc_driver_register(&dpaa2_eth_driver);
+	if (err) {
+		dpaa2_eth_dbg_exit();
+		return err;
+	}
+
+	return 0;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
+	dpaa2_eth_dbg_exit();
+	fsl_mc_driver_unregister(&dpaa2_eth_driver);
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
new file mode 100644
index 0000000..8a0e65b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ */
+
+#ifndef __DPAA2_ETH_H
+#define __DPAA2_ETH_H
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/fsl/mc.h>
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+#include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
+#define DPAA2_ETH_STORE_SIZE		16
+
+/* Maximum number of scatter-gather entries in an ingress frame,
+ * considering the maximum receive frame size is 64K
+ */
+#define DPAA2_ETH_MAX_SG_ENTRIES	((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
+/* Maximum acceptable MTU value. It is in direct relation with the hardware
+ * enforced Max Frame Length (currently 10k).
+ */
+#define DPAA2_ETH_MFL			(10 * 1024)
+#define DPAA2_ETH_MAX_MTU		(DPAA2_ETH_MFL - VLAN_ETH_HLEN)
+/* Convert L3 MTU to L2 MFL */
+#define DPAA2_ETH_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN)
+
+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
+ * frames in the Rx queues (length of the current frame is not
+ * taken into account when making the taildrop decision)
+ */
+#define DPAA2_ETH_TAILDROP_THRESH	(64 * 1024)
+
+/* Maximum number of Tx confirmation frames to be processed
+ * in a single NAPI call
+ */
+#define DPAA2_ETH_TXCONF_PER_NAPI	256
+
+/* Buffer quota per queue. Must be large enough such that for minimum sized
+ * frames taildrop kicks in before the bpool gets depleted, so we compute
+ * how many 64B frames fit inside the taildrop threshold and add a margin
+ * to accommodate the buffer refill delay.
+ */
+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE	(DPAA2_ETH_TAILDROP_THRESH / 64)
+#define DPAA2_ETH_NUM_BUFS		(DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+#define DPAA2_ETH_REFILL_THRESH \
+	(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
+
+/* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+#define DPAA2_ETH_BUFS_PER_CMD		7
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_ETH_TX_BUF_ALIGN		64
+
+#define DPAA2_ETH_RX_BUF_RAW_SIZE	PAGE_SIZE
+#define DPAA2_ETH_RX_BUF_TAILROOM \
+	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_ETH_RX_BUF_SIZE \
+	(DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
+
+/* Hardware annotation area in RX/TX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE		64
+#define DPAA2_ETH_TX_HWA_SIZE		128
+
+/* PTP nominal frequency 1GHz */
+#define DPAA2_PTP_CLK_PERIOD_NS		1
+
+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
+ * to 256B. For newer revisions, the requirement is only for 64B alignment
+ */
+#define DPAA2_ETH_RX_BUF_ALIGN_REV1	256
+#define DPAA2_ETH_RX_BUF_ALIGN		64
+
+/* We are accommodating a skb backpointer and some S/G info
+ * in the frame's software annotation. The hardware
+ * options are either 0 or 64, so we choose the latter.
+ */
+#define DPAA2_ETH_SWA_SIZE		64
+
+/* We store different information in the software annotation area of a Tx frame
+ * based on what type of frame it is
+ */
+enum dpaa2_eth_swa_type {
+	DPAA2_ETH_SWA_SINGLE,
+	DPAA2_ETH_SWA_SG,
+	DPAA2_ETH_SWA_XDP,
+};
+
+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
+struct dpaa2_eth_swa {
+	enum dpaa2_eth_swa_type type;
+	union {
+		struct {
+			struct sk_buff *skb;
+		} single;
+		struct {
+			struct sk_buff *skb;
+			struct scatterlist *scl;
+			int num_sg;
+			int sgt_size;
+		} sg;
+		struct {
+			int dma_size;
+			struct xdp_frame *xdpf;
+		} xdp;
+	};
+};
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV		0x8000
+#define DPAA2_FD_FRC_FAEADV		0x4000
+#define DPAA2_FD_FRC_FAPRV		0x2000
+#define DPAA2_FD_FRC_FAIADV		0x1000
+#define DPAA2_FD_FRC_FASWOV		0x0800
+#define DPAA2_FD_FRC_FAICFDV		0x0400
+
+/* Error bits in FD CTRL */
+#define DPAA2_FD_RX_ERR_MASK		(FD_CTRL_SBE | FD_CTRL_FAERR)
+#define DPAA2_FD_TX_ERR_MASK		(FD_CTRL_UFD	| \
+					 FD_CTRL_SBE	| \
+					 FD_CTRL_FSE	| \
+					 FD_CTRL_FAERR)
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL		0x00020000	/* ASAL = 128B */
+
+/* Frame annotation status */
+struct dpaa2_fas {
+	u8 reserved;
+	u8 ppid;
+	__le16 ifpid;
+	__le32 status;
+};
+
+/* Frame annotation status word is located in the first 8 bytes
+ * of the buffer's hardware annoatation area
+ */
+#define DPAA2_FAS_OFFSET		0
+#define DPAA2_FAS_SIZE			(sizeof(struct dpaa2_fas))
+
+/* Timestamp is located in the next 8 bytes of the buffer's
+ * hardware annotation area
+ */
+#define DPAA2_TS_OFFSET			0x8
+
+/* Frame annotation egress action descriptor */
+#define DPAA2_FAEAD_OFFSET		0x58
+
+struct dpaa2_faead {
+	__le32 conf_fqid;
+	__le32 ctrl;
+};
+
+#define DPAA2_FAEAD_A2V			0x20000000
+#define DPAA2_FAEAD_A4V			0x08000000
+#define DPAA2_FAEAD_UPDV		0x00001000
+#define DPAA2_FAEAD_EBDDV		0x00002000
+#define DPAA2_FAEAD_UPD			0x00000010
+
+/* Accessors for the hardware annotation fields that we use */
+static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
+{
+	return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
+}
+
+static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
+{
+	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
+}
+
+static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
+{
+	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
+static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
+	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
+}
+
+/* Error and status bits in the frame annotation status word */
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_FAS_DISC			0x80000000
+/* MACSEC frame */
+#define DPAA2_FAS_MS			0x40000000
+#define DPAA2_FAS_PTP			0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_FAS_MC			0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_FAS_BC			0x02000000
+#define DPAA2_FAS_KSE			0x00040000
+#define DPAA2_FAS_EOFHE			0x00020000
+#define DPAA2_FAS_MNLE			0x00010000
+#define DPAA2_FAS_TIDE			0x00008000
+#define DPAA2_FAS_PIEE			0x00004000
+/* Frame length error */
+#define DPAA2_FAS_FLE			0x00002000
+/* Frame physical error */
+#define DPAA2_FAS_FPE			0x00001000
+#define DPAA2_FAS_PTE			0x00000080
+#define DPAA2_FAS_ISP			0x00000040
+#define DPAA2_FAS_PHE			0x00000020
+#define DPAA2_FAS_BLE			0x00000010
+/* L3 csum validation performed */
+#define DPAA2_FAS_L3CV			0x00000008
+/* L3 csum error */
+#define DPAA2_FAS_L3CE			0x00000004
+/* L4 csum validation performed */
+#define DPAA2_FAS_L4CV			0x00000002
+/* L4 csum error */
+#define DPAA2_FAS_L4CE			0x00000001
+/* Possible errors on the ingress path */
+#define DPAA2_FAS_RX_ERR_MASK		(DPAA2_FAS_KSE		| \
+					 DPAA2_FAS_EOFHE	| \
+					 DPAA2_FAS_MNLE		| \
+					 DPAA2_FAS_TIDE		| \
+					 DPAA2_FAS_PIEE		| \
+					 DPAA2_FAS_FLE		| \
+					 DPAA2_FAS_FPE		| \
+					 DPAA2_FAS_PTE		| \
+					 DPAA2_FAS_ISP		| \
+					 DPAA2_FAS_PHE		| \
+					 DPAA2_FAS_BLE		| \
+					 DPAA2_FAS_L3CE		| \
+					 DPAA2_FAS_L4CE)
+
+/* Time in milliseconds between link state updates */
+#define DPAA2_ETH_LINK_STATE_REFRESH	1000
+
+/* Number of times to retry a frame enqueue before giving up.
+ * Value determined empirically, in order to minimize the number
+ * of frames dropped on Tx
+ */
+#define DPAA2_ETH_ENQUEUE_RETRIES	10
+
+/* Driver statistics, other than those in struct rtnl_link_stats64.
+ * These are usually collected per-CPU and aggregated by ethtool.
+ */
+struct dpaa2_eth_drv_stats {
+	__u64	tx_conf_frames;
+	__u64	tx_conf_bytes;
+	__u64	tx_sg_frames;
+	__u64	tx_sg_bytes;
+	__u64	tx_reallocs;
+	__u64	rx_sg_frames;
+	__u64	rx_sg_bytes;
+	/* Enqueues retried due to portal busy */
+	__u64	tx_portal_busy;
+};
+
+/* Per-FQ statistics */
+struct dpaa2_eth_fq_stats {
+	/* Number of frames received on this queue */
+	__u64 frames;
+};
+
+/* Per-channel statistics */
+struct dpaa2_eth_ch_stats {
+	/* Volatile dequeues retried due to portal busy */
+	__u64 dequeue_portal_busy;
+	/* Pull errors */
+	__u64 pull_err;
+	/* Number of CDANs; useful to estimate avg NAPI len */
+	__u64 cdan;
+	/* XDP counters */
+	__u64 xdp_drop;
+	__u64 xdp_tx;
+	__u64 xdp_tx_err;
+	__u64 xdp_redirect;
+};
+
+/* Maximum number of queues associated with a DPNI */
+#define DPAA2_ETH_MAX_TCS		8
+#define DPAA2_ETH_MAX_RX_QUEUES		16
+#define DPAA2_ETH_MAX_TX_QUEUES		16
+#define DPAA2_ETH_MAX_QUEUES		(DPAA2_ETH_MAX_RX_QUEUES + \
+					DPAA2_ETH_MAX_TX_QUEUES)
+#define DPAA2_ETH_MAX_NETDEV_QUEUES	\
+	(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
+
+#define DPAA2_ETH_MAX_DPCONS		16
+
+enum dpaa2_eth_fq_type {
+	DPAA2_RX_FQ = 0,
+	DPAA2_TX_CONF_FQ,
+};
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_eth_fq {
+	u32 fqid;
+	u32 tx_qdbin;
+	u32 tx_fqid[DPAA2_ETH_MAX_TCS];
+	u16 flowid;
+	u8 tc;
+	int target_cpu;
+	u32 dq_frames;
+	u32 dq_bytes;
+	struct dpaa2_eth_channel *channel;
+	enum dpaa2_eth_fq_type type;
+
+	void (*consume)(struct dpaa2_eth_priv *priv,
+			struct dpaa2_eth_channel *ch,
+			const struct dpaa2_fd *fd,
+			struct dpaa2_eth_fq *fq);
+	struct dpaa2_eth_fq_stats stats;
+};
+
+struct dpaa2_eth_ch_xdp {
+	struct bpf_prog *prog;
+	u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+	int drop_cnt;
+	unsigned int res;
+};
+
+struct dpaa2_eth_channel {
+	struct dpaa2_io_notification_ctx nctx;
+	struct fsl_mc_device *dpcon;
+	int dpcon_id;
+	int ch_id;
+	struct napi_struct napi;
+	struct dpaa2_io *dpio;
+	struct dpaa2_io_store *store;
+	struct dpaa2_eth_priv *priv;
+	int buf_count;
+	struct dpaa2_eth_ch_stats stats;
+	struct dpaa2_eth_ch_xdp xdp;
+	struct xdp_rxq_info xdp_rxq;
+	struct list_head *rx_list;
+};
+
+struct dpaa2_eth_dist_fields {
+	u64 rxnfc_field;
+	enum net_prot cls_prot;
+	int cls_field;
+	int size;
+	u64 id;
+};
+
+struct dpaa2_eth_cls_rule {
+	struct ethtool_rx_flow_spec fs;
+	u8 in_use;
+};
+
+/* Driver private data */
+struct dpaa2_eth_priv {
+	struct net_device *net_dev;
+
+	u8 num_fqs;
+	struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+	int (*enqueue)(struct dpaa2_eth_priv *priv,
+		       struct dpaa2_eth_fq *fq,
+		       struct dpaa2_fd *fd, u8 prio);
+
+	u8 num_channels;
+	struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+
+	struct dpni_attr dpni_attrs;
+	u16 dpni_ver_major;
+	u16 dpni_ver_minor;
+	u16 tx_data_offset;
+
+	struct fsl_mc_device *dpbp_dev;
+	u16 bpid;
+	struct iommu_domain *iommu_domain;
+
+	bool tx_tstamp; /* Tx timestamping enabled */
+	bool rx_tstamp; /* Rx timestamping enabled */
+
+	u16 tx_qdid;
+	struct fsl_mc_io *mc_io;
+	/* Cores which have an affine DPIO/DPCON.
+	 * This is the cpu set on which Rx and Tx conf frames are processed
+	 */
+	struct cpumask dpio_cpumask;
+
+	/* Standard statistics */
+	struct rtnl_link_stats64 __percpu *percpu_stats;
+	/* Extra stats, in addition to the ones known by the kernel */
+	struct dpaa2_eth_drv_stats __percpu *percpu_extras;
+
+	u16 mc_token;
+	u8 rx_td_enabled;
+
+	struct dpni_link_state link_state;
+	bool do_link_poll;
+	struct task_struct *poll_thread;
+
+	/* enabled ethtool hashing bits */
+	u64 rx_hash_fields;
+	u64 rx_cls_fields;
+	struct dpaa2_eth_cls_rule *cls_rules;
+	u8 rx_cls_enabled;
+	struct bpf_prog *xdp_prog;
+#ifdef CONFIG_DEBUG_FS
+	struct dpaa2_debugfs dbg;
+#endif
+};
+
+#define DPAA2_RXH_SUPPORTED	(RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
+				| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
+				| RXH_L4_B_2_3)
+
+/* default Rx hash options, set during probing */
+#define DPAA2_RXH_DEFAULT	(RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
+				 RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+#define dpaa2_eth_hash_enabled(priv)	\
+	((priv)->dpni_attrs.num_queues > 1)
+
+/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+extern const struct ethtool_ops dpaa2_ethtool_ops;
+extern int dpaa2_phc_index;
+
+static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
+					 u16 ver_major, u16 ver_minor)
+{
+	if (priv->dpni_ver_major == ver_major)
+		return priv->dpni_ver_minor - ver_minor;
+	return priv->dpni_ver_major - ver_major;
+}
+
+/* Minimum firmware version that supports a more flexible API
+ * for configuring the Rx flow hash key
+ */
+#define DPNI_RX_DIST_KEY_VER_MAJOR	7
+#define DPNI_RX_DIST_KEY_VER_MINOR	5
+
+#define dpaa2_eth_has_legacy_dist(priv)					\
+	(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR,	\
+				DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
+#define dpaa2_eth_fs_enabled(priv)	\
+	(!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv)	\
+	((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
+#define dpaa2_eth_fs_count(priv)        \
+	((priv)->dpni_attrs.fs_entries)
+
+#define dpaa2_eth_tc_count(priv)	\
+	((priv)->dpni_attrs.num_tcs)
+
+/* We have exactly one {Rx, Tx conf} queue per channel */
+#define dpaa2_eth_queue_count(priv)     \
+	((priv)->num_channels)
+
+enum dpaa2_eth_rx_dist {
+	DPAA2_ETH_RX_DIST_HASH,
+	DPAA2_ETH_RX_DIST_CLS
+};
+
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST		BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC		BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE		BIT(2)
+#define DPAA2_ETH_DIST_VLAN		BIT(3)
+#define DPAA2_ETH_DIST_IPSRC		BIT(4)
+#define DPAA2_ETH_DIST_IPDST		BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO		BIT(6)
+#define DPAA2_ETH_DIST_L4SRC		BIT(7)
+#define DPAA2_ETH_DIST_L4DST		BIT(8)
+#define DPAA2_ETH_DIST_ALL		(~0ULL)
+
+#define DPNI_PAUSE_VER_MAJOR		7
+#define DPNI_PAUSE_VER_MINOR		13
+#define dpaa2_eth_has_pause_support(priv)			\
+	(dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR,	\
+				DPNI_PAUSE_VER_MINOR) >= 0)
+
+static inline
+unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
+				       struct sk_buff *skb)
+{
+	unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+
+	/* If we don't have an skb (e.g. XDP buffer), we only need space for
+	 * the software annotation area
+	 */
+	if (!skb)
+		return headroom;
+
+	/* For non-linear skbs we have no headroom requirement, as we build a
+	 * SG frame with a newly allocated SGT buffer
+	 */
+	if (skb_is_nonlinear(skb))
+		return 0;
+
+	/* If we have Tx timestamping, need 128B hardware annotation */
+	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+		headroom += DPAA2_ETH_TX_HWA_SIZE;
+
+	return headroom;
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios
+ */
+static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+{
+	return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
+}
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
+int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
+
+#endif	/* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
new file mode 100644
index 0000000..0aa1c34
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ */
+
+#include <linux/net_tstamp.h>
+#include <linux/nospec.h>
+
+#include "dpni.h"	/* DPNI_LINK_OPT_* */
+#include "dpaa2-eth.h"
+
+/* To be kept in sync with DPNI statistics */
+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+	"[hw] rx frames",
+	"[hw] rx bytes",
+	"[hw] rx mcast frames",
+	"[hw] rx mcast bytes",
+	"[hw] rx bcast frames",
+	"[hw] rx bcast bytes",
+	"[hw] tx frames",
+	"[hw] tx bytes",
+	"[hw] tx mcast frames",
+	"[hw] tx mcast bytes",
+	"[hw] tx bcast frames",
+	"[hw] tx bcast bytes",
+	"[hw] rx filtered frames",
+	"[hw] rx discarded frames",
+	"[hw] rx nobuffer discards",
+	"[hw] tx discarded frames",
+	"[hw] tx confirmed frames",
+	"[hw] tx dequeued bytes",
+	"[hw] tx dequeued frames",
+	"[hw] tx rejected bytes",
+	"[hw] tx rejected frames",
+	"[hw] tx pending frames",
+};
+
+#define DPAA2_ETH_NUM_STATS	ARRAY_SIZE(dpaa2_ethtool_stats)
+
+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+	/* per-cpu stats */
+	"[drv] tx conf frames",
+	"[drv] tx conf bytes",
+	"[drv] tx sg frames",
+	"[drv] tx sg bytes",
+	"[drv] tx realloc frames",
+	"[drv] rx sg frames",
+	"[drv] rx sg bytes",
+	"[drv] enqueue portal busy",
+	/* Channel stats */
+	"[drv] dequeue portal busy",
+	"[drv] channel pull errors",
+	"[drv] cdan",
+	"[drv] xdp drop",
+	"[drv] xdp tx",
+	"[drv] xdp tx errors",
+	"[drv] xdp redirect",
+	/* FQ stats */
+	"[qbman] rx pending frames",
+	"[qbman] rx pending bytes",
+	"[qbman] tx conf pending frames",
+	"[qbman] tx conf pending bytes",
+	"[qbman] buffer count",
+};
+
+#define DPAA2_ETH_NUM_EXTRA_STATS	ARRAY_SIZE(dpaa2_ethtool_extras)
+
+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
+				  struct ethtool_drvinfo *drvinfo)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
+
+	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+		sizeof(drvinfo->bus_info));
+}
+
+static int
+dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
+			     struct ethtool_link_ksettings *link_settings)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+	link_settings->base.autoneg = AUTONEG_DISABLE;
+	if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+		link_settings->base.duplex = DUPLEX_FULL;
+	link_settings->base.speed = priv->link_state.rate;
+
+	return 0;
+}
+
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+				     struct ethtool_pauseparam *pause)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	u64 link_options = priv->link_state.options;
+
+	pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
+	pause->tx_pause = pause->rx_pause ^
+			  !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+	pause->autoneg = AUTONEG_DISABLE;
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+				    struct ethtool_pauseparam *pause)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpni_link_cfg cfg = {0};
+	int err;
+
+	if (!dpaa2_eth_has_pause_support(priv)) {
+		netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
+			    DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
+		return -EOPNOTSUPP;
+	}
+
+	if (pause->autoneg)
+		return -EOPNOTSUPP;
+
+	cfg.rate = priv->link_state.rate;
+	cfg.options = priv->link_state.options;
+	if (pause->rx_pause)
+		cfg.options |= DPNI_LINK_OPT_PAUSE;
+	else
+		cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+	if (!!pause->rx_pause ^ !!pause->tx_pause)
+		cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+	else
+		cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+	if (cfg.options == priv->link_state.options)
+		return 0;
+
+	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+	if (err) {
+		netdev_err(net_dev, "dpni_set_link_state failed\n");
+		return err;
+	}
+
+	priv->link_state.options = cfg.options;
+
+	return 0;
+}
+
+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
+				  u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
+			strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
+			strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
+		return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/** Fill in hardware counters, as returned by MC.
+ */
+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+					struct ethtool_stats *stats,
+					u64 *data)
+{
+	int i = 0;
+	int j, k, err;
+	int num_cnt;
+	union dpni_statistics dpni_stats;
+	u32 fcnt, bcnt;
+	u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+	u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+	u32 buf_cnt;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpaa2_eth_drv_stats *extras;
+	struct dpaa2_eth_ch_stats *ch_stats;
+	int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
+		sizeof(dpni_stats.page_0),
+		sizeof(dpni_stats.page_1),
+		sizeof(dpni_stats.page_2),
+		sizeof(dpni_stats.page_3),
+		sizeof(dpni_stats.page_4),
+		sizeof(dpni_stats.page_5),
+		sizeof(dpni_stats.page_6),
+	};
+
+	memset(data, 0,
+	       sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
+	/* Print standard counters, from DPNI statistics */
+	for (j = 0; j <= 6; j++) {
+		/* We're not interested in pages 4 & 5 for now */
+		if (j == 4 || j == 5)
+			continue;
+		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
+					  j, &dpni_stats);
+		if (err == -EINVAL)
+			/* Older firmware versions don't support all pages */
+			memset(&dpni_stats, 0, sizeof(dpni_stats));
+		else
+			netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
+
+		num_cnt = dpni_stats_page_size[j] / sizeof(u64);
+		for (k = 0; k < num_cnt; k++)
+			*(data + i++) = dpni_stats.raw.counter[k];
+	}
+
+	/* Print per-cpu extra stats */
+	for_each_online_cpu(k) {
+		extras = per_cpu_ptr(priv->percpu_extras, k);
+		for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
+			*((__u64 *)data + i + j) += *((__u64 *)extras + j);
+	}
+	i += j;
+
+	/* Per-channel stats */
+	for (k = 0; k < priv->num_channels; k++) {
+		ch_stats = &priv->channel[k]->stats;
+		for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+			*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
+	}
+	i += j;
+
+	for (j = 0; j < priv->num_fqs; j++) {
+		/* Print FQ instantaneous counts */
+		err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+					      &fcnt, &bcnt);
+		if (err) {
+			netdev_warn(net_dev, "FQ query error %d", err);
+			return;
+		}
+
+		if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
+			fcnt_tx_total += fcnt;
+			bcnt_tx_total += bcnt;
+		} else {
+			fcnt_rx_total += fcnt;
+			bcnt_rx_total += bcnt;
+		}
+	}
+
+	*(data + i++) = fcnt_rx_total;
+	*(data + i++) = bcnt_rx_total;
+	*(data + i++) = fcnt_tx_total;
+	*(data + i++) = bcnt_tx_total;
+
+	err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
+	if (err) {
+		netdev_warn(net_dev, "Buffer count query error %d\n", err);
+		return;
+	}
+	*(data + i++) = buf_cnt;
+}
+
+static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+			 void *key, void *mask, u64 *fields)
+{
+	int off;
+
+	if (eth_mask->h_proto) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		*(__be16 *)(key + off) = eth_value->h_proto;
+		*(__be16 *)(mask + off) = eth_mask->h_proto;
+		*fields |= DPAA2_ETH_DIST_ETHTYPE;
+	}
+
+	if (!is_zero_ether_addr(eth_mask->h_source)) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
+		ether_addr_copy(key + off, eth_value->h_source);
+		ether_addr_copy(mask + off, eth_mask->h_source);
+		*fields |= DPAA2_ETH_DIST_ETHSRC;
+	}
+
+	if (!is_zero_ether_addr(eth_mask->h_dest)) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+		ether_addr_copy(key + off, eth_value->h_dest);
+		ether_addr_copy(mask + off, eth_mask->h_dest);
+		*fields |= DPAA2_ETH_DIST_ETHDST;
+	}
+
+	return 0;
+}
+
+static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+			 struct ethtool_usrip4_spec *uip_mask,
+			 void *key, void *mask, u64 *fields)
+{
+	int off;
+	u32 tmp_value, tmp_mask;
+
+	if (uip_mask->tos || uip_mask->ip_ver)
+		return -EOPNOTSUPP;
+
+	if (uip_mask->ip4src) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+		*(__be32 *)(key + off) = uip_value->ip4src;
+		*(__be32 *)(mask + off) = uip_mask->ip4src;
+		*fields |= DPAA2_ETH_DIST_IPSRC;
+	}
+
+	if (uip_mask->ip4dst) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+		*(__be32 *)(key + off) = uip_value->ip4dst;
+		*(__be32 *)(mask + off) = uip_mask->ip4dst;
+		*fields |= DPAA2_ETH_DIST_IPDST;
+	}
+
+	if (uip_mask->proto) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+		*(u8 *)(key + off) = uip_value->proto;
+		*(u8 *)(mask + off) = uip_mask->proto;
+		*fields |= DPAA2_ETH_DIST_IPPROTO;
+	}
+
+	if (uip_mask->l4_4_bytes) {
+		tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
+		tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
+		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+		*(__be16 *)(key + off) = htons(tmp_value >> 16);
+		*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+		*fields |= DPAA2_ETH_DIST_L4SRC;
+
+		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+		*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
+		*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+		*fields |= DPAA2_ETH_DIST_L4DST;
+	}
+
+	/* Only apply the rule for IPv4 frames */
+	off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+	*(__be16 *)(key + off) = htons(ETH_P_IP);
+	*(__be16 *)(mask + off) = htons(0xFFFF);
+	*fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+	return 0;
+}
+
+static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+			struct ethtool_tcpip4_spec *l4_mask,
+			void *key, void *mask, u8 l4_proto, u64 *fields)
+{
+	int off;
+
+	if (l4_mask->tos)
+		return -EOPNOTSUPP;
+
+	if (l4_mask->ip4src) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+		*(__be32 *)(key + off) = l4_value->ip4src;
+		*(__be32 *)(mask + off) = l4_mask->ip4src;
+		*fields |= DPAA2_ETH_DIST_IPSRC;
+	}
+
+	if (l4_mask->ip4dst) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+		*(__be32 *)(key + off) = l4_value->ip4dst;
+		*(__be32 *)(mask + off) = l4_mask->ip4dst;
+		*fields |= DPAA2_ETH_DIST_IPDST;
+	}
+
+	if (l4_mask->psrc) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+		*(__be16 *)(key + off) = l4_value->psrc;
+		*(__be16 *)(mask + off) = l4_mask->psrc;
+		*fields |= DPAA2_ETH_DIST_L4SRC;
+	}
+
+	if (l4_mask->pdst) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+		*(__be16 *)(key + off) = l4_value->pdst;
+		*(__be16 *)(mask + off) = l4_mask->pdst;
+		*fields |= DPAA2_ETH_DIST_L4DST;
+	}
+
+	/* Only apply the rule for IPv4 frames with the specified L4 proto */
+	off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+	*(__be16 *)(key + off) = htons(ETH_P_IP);
+	*(__be16 *)(mask + off) = htons(0xFFFF);
+	*fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+	off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+	*(u8 *)(key + off) = l4_proto;
+	*(u8 *)(mask + off) = 0xFF;
+	*fields |= DPAA2_ETH_DIST_IPPROTO;
+
+	return 0;
+}
+
+static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
+			 struct ethtool_flow_ext *ext_mask,
+			 void *key, void *mask, u64 *fields)
+{
+	int off;
+
+	if (ext_mask->vlan_etype)
+		return -EOPNOTSUPP;
+
+	if (ext_mask->vlan_tci) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+		*(__be16 *)(key + off) = ext_value->vlan_tci;
+		*(__be16 *)(mask + off) = ext_mask->vlan_tci;
+		*fields |= DPAA2_ETH_DIST_VLAN;
+	}
+
+	return 0;
+}
+
+static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+			     struct ethtool_flow_ext *ext_mask,
+			     void *key, void *mask, u64 *fields)
+{
+	int off;
+
+	if (!is_zero_ether_addr(ext_mask->h_dest)) {
+		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+		ether_addr_copy(key + off, ext_value->h_dest);
+		ether_addr_copy(mask + off, ext_mask->h_dest);
+		*fields |= DPAA2_ETH_DIST_ETHDST;
+	}
+
+	return 0;
+}
+
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+			 u64 *fields)
+{
+	int err;
+
+	switch (fs->flow_type & 0xFF) {
+	case ETHER_FLOW:
+		err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+				    key, mask, fields);
+		break;
+	case IP_USER_FLOW:
+		err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
+				    &fs->m_u.usr_ip4_spec, key, mask, fields);
+		break;
+	case TCP_V4_FLOW:
+		err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+				   key, mask, IPPROTO_TCP, fields);
+		break;
+	case UDP_V4_FLOW:
+		err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+				   key, mask, IPPROTO_UDP, fields);
+		break;
+	case SCTP_V4_FLOW:
+		err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+				   &fs->m_u.sctp_ip4_spec, key, mask,
+				   IPPROTO_SCTP, fields);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (err)
+		return err;
+
+	if (fs->flow_type & FLOW_EXT) {
+		err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+		if (err)
+			return err;
+	}
+
+	if (fs->flow_type & FLOW_MAC_EXT) {
+		err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+					fields);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int do_cls_rule(struct net_device *net_dev,
+		       struct ethtool_rx_flow_spec *fs,
+		       bool add)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct device *dev = net_dev->dev.parent;
+	struct dpni_rule_cfg rule_cfg = { 0 };
+	struct dpni_fs_action_cfg fs_act = { 0 };
+	dma_addr_t key_iova;
+	u64 fields = 0;
+	void *key_buf;
+	int err;
+
+	if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+	    fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+		return -EINVAL;
+
+	rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
+
+	/* allocate twice the key size, for the actual key and for mask */
+	key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
+	if (!key_buf)
+		return -ENOMEM;
+
+	/* Fill the key and mask memory areas */
+	err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+	if (err)
+		goto free_mem;
+
+	if (!dpaa2_eth_fs_mask_enabled(priv)) {
+		/* Masking allows us to configure a maximal key during init and
+		 * use it for all flow steering rules. Without it, we include
+		 * in the key only the fields actually used, so we need to
+		 * extract the others from the final key buffer.
+		 *
+		 * Program the FS key if needed, or return error if previously
+		 * set key can't be used for the current rule. User needs to
+		 * delete existing rules in this case to allow for the new one.
+		 */
+		if (!priv->rx_cls_fields) {
+			err = dpaa2_eth_set_cls(net_dev, fields);
+			if (err)
+				goto free_mem;
+
+			priv->rx_cls_fields = fields;
+		} else if (priv->rx_cls_fields != fields) {
+			netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+			err = -EOPNOTSUPP;
+			goto free_mem;
+		}
+
+		dpaa2_eth_cls_trim_rule(key_buf, fields);
+		rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+	}
+
+	key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
+				  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, key_iova)) {
+		err = -ENOMEM;
+		goto free_mem;
+	}
+
+	rule_cfg.key_iova = key_iova;
+	if (dpaa2_eth_fs_mask_enabled(priv))
+		rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
+	if (add) {
+		if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+			fs_act.options |= DPNI_FS_OPT_DISCARD;
+		else
+			fs_act.flow_id = fs->ring_cookie;
+		err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+					fs->location, &rule_cfg, &fs_act);
+	} else {
+		err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+					   &rule_cfg);
+	}
+
+	dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+free_mem:
+	kfree(key_buf);
+
+	return err;
+}
+
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+	int i, rules = 0;
+
+	for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+		if (priv->cls_rules[i].in_use)
+			rules++;
+
+	return rules;
+}
+
+static int update_cls_rule(struct net_device *net_dev,
+			   struct ethtool_rx_flow_spec *new_fs,
+			   int location)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct dpaa2_eth_cls_rule *rule;
+	int err = -EINVAL;
+
+	if (!priv->rx_cls_enabled)
+		return -EOPNOTSUPP;
+
+	if (location >= dpaa2_eth_fs_count(priv))
+		return -EINVAL;
+
+	rule = &priv->cls_rules[location];
+
+	/* If a rule is present at the specified location, delete it. */
+	if (rule->in_use) {
+		err = do_cls_rule(net_dev, &rule->fs, false);
+		if (err)
+			return err;
+
+		rule->in_use = 0;
+
+		if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+			priv->rx_cls_fields = 0;
+	}
+
+	/* If no new entry to add, return here */
+	if (!new_fs)
+		return err;
+
+	err = do_cls_rule(net_dev, new_fs, true);
+	if (err)
+		return err;
+
+	rule->in_use = 1;
+	rule->fs = *new_fs;
+
+	return 0;
+}
+
+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
+			       struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	int max_rules = dpaa2_eth_fs_count(priv);
+	int i, j = 0;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_GRXFH:
+		/* we purposely ignore cmd->flow_type for now, because the
+		 * classifier only supports a single set of fields for all
+		 * protocols
+		 */
+		rxnfc->data = priv->rx_hash_fields;
+		break;
+	case ETHTOOL_GRXRINGS:
+		rxnfc->data = dpaa2_eth_queue_count(priv);
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		rxnfc->rule_cnt = 0;
+		rxnfc->rule_cnt = num_rules(priv);
+		rxnfc->data = max_rules;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		if (rxnfc->fs.location >= max_rules)
+			return -EINVAL;
+		rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
+							max_rules);
+		if (!priv->cls_rules[rxnfc->fs.location].in_use)
+			return -EINVAL;
+		rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		for (i = 0; i < max_rules; i++) {
+			if (!priv->cls_rules[i].in_use)
+				continue;
+			if (j == rxnfc->rule_cnt)
+				return -EMSGSIZE;
+			rule_locs[j++] = i;
+		}
+		rxnfc->rule_cnt = j;
+		rxnfc->data = max_rules;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
+			       struct ethtool_rxnfc *rxnfc)
+{
+	int err = 0;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_SRXFH:
+		if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+			return -EOPNOTSUPP;
+		err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
+		break;
+	case ETHTOOL_SRXCLSRLINS:
+		err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+	}
+
+	return err;
+}
+
+int dpaa2_phc_index = -1;
+EXPORT_SYMBOL(dpaa2_phc_index);
+
+static int dpaa2_eth_get_ts_info(struct net_device *dev,
+				 struct ethtool_ts_info *info)
+{
+	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->phc_index = dpaa2_phc_index;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+			 (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_ALL);
+	return 0;
+}
+
+const struct ethtool_ops dpaa2_ethtool_ops = {
+	.get_drvinfo = dpaa2_eth_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_link_ksettings = dpaa2_eth_get_link_ksettings,
+	.get_pauseparam = dpaa2_eth_get_pauseparam,
+	.set_pauseparam = dpaa2_eth_set_pauseparam,
+	.get_sset_count = dpaa2_eth_get_sset_count,
+	.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
+	.get_strings = dpaa2_eth_get_strings,
+	.get_rxnfc = dpaa2_eth_get_rxnfc,
+	.set_rxnfc = dpaa2_eth_set_rxnfc,
+	.get_ts_info = dpaa2_eth_get_ts_info,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
new file mode 100644
index 0000000..a9503ae
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "dpaa2-ptp.h"
+
+static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+	struct fsl_mc_device *mc_dev;
+	struct device *dev;
+	u32 mask = 0;
+	u32 bit;
+	int err;
+
+	dev = ptp_qoriq->dev;
+	mc_dev = to_fsl_mc_device(dev);
+
+	switch (rq->type) {
+	case PTP_CLK_REQ_PPS:
+		bit = DPRTC_EVENT_PPS;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	err = dprtc_get_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+				 DPRTC_IRQ_INDEX, &mask);
+	if (err < 0) {
+		dev_err(dev, "dprtc_get_irq_mask(): %d\n", err);
+		return err;
+	}
+
+	if (on)
+		mask |= bit;
+	else
+		mask &= ~bit;
+
+	err = dprtc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+				 DPRTC_IRQ_INDEX, mask);
+	if (err < 0) {
+		dev_err(dev, "dprtc_set_irq_mask(): %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct ptp_clock_info dpaa2_ptp_caps = {
+	.owner		= THIS_MODULE,
+	.name		= "DPAA2 PTP Clock",
+	.max_adj	= 512000,
+	.n_alarm	= 2,
+	.n_ext_ts	= 2,
+	.n_per_out	= 3,
+	.n_pins		= 0,
+	.pps		= 1,
+	.adjfine	= ptp_qoriq_adjfine,
+	.adjtime	= ptp_qoriq_adjtime,
+	.gettime64	= ptp_qoriq_gettime,
+	.settime64	= ptp_qoriq_settime,
+	.enable		= dpaa2_ptp_enable,
+};
+
+static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
+{
+	struct ptp_qoriq *ptp_qoriq = priv;
+	struct ptp_clock_event event;
+	struct fsl_mc_device *mc_dev;
+	struct device *dev;
+	u32 status = 0;
+	int err;
+
+	dev = ptp_qoriq->dev;
+	mc_dev = to_fsl_mc_device(dev);
+
+	err = dprtc_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+				   DPRTC_IRQ_INDEX, &status);
+	if (unlikely(err)) {
+		dev_err(dev, "dprtc_get_irq_status err %d\n", err);
+		return IRQ_NONE;
+	}
+
+	if (status & DPRTC_EVENT_PPS) {
+		event.type = PTP_CLOCK_PPS;
+		ptp_clock_event(ptp_qoriq->clock, &event);
+	}
+
+	err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+				     DPRTC_IRQ_INDEX, status);
+	if (unlikely(err)) {
+		dev_err(dev, "dprtc_clear_irq_status err %d\n", err);
+		return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+{
+	struct device *dev = &mc_dev->dev;
+	struct fsl_mc_device_irq *irq;
+	struct ptp_qoriq *ptp_qoriq;
+	struct device_node *node;
+	void __iomem *base;
+	int err;
+
+	ptp_qoriq = devm_kzalloc(dev, sizeof(*ptp_qoriq), GFP_KERNEL);
+	if (!ptp_qoriq)
+		return -ENOMEM;
+
+	err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+	if (err) {
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+		goto err_exit;
+	}
+
+	err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+			 &mc_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dprtc_open err %d\n", err);
+		goto err_free_mcp;
+	}
+
+	ptp_qoriq->dev = dev;
+
+	node = of_find_compatible_node(NULL, NULL, "fsl,dpaa2-ptp");
+	if (!node) {
+		err = -ENODEV;
+		goto err_close;
+	}
+
+	dev->of_node = node;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		err = -ENOMEM;
+		goto err_close;
+	}
+
+	err = fsl_mc_allocate_irqs(mc_dev);
+	if (err) {
+		dev_err(dev, "MC irqs allocation failed\n");
+		goto err_unmap;
+	}
+
+	irq = mc_dev->irqs[0];
+	ptp_qoriq->irq = irq->msi_desc->irq;
+
+	err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL,
+					dpaa2_ptp_irq_handler_thread,
+					IRQF_NO_SUSPEND | IRQF_ONESHOT,
+					dev_name(dev), ptp_qoriq);
+	if (err < 0) {
+		dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+		goto err_free_mc_irq;
+	}
+
+	err = dprtc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+				   DPRTC_IRQ_INDEX, 1);
+	if (err < 0) {
+		dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
+		goto err_free_mc_irq;
+	}
+
+	err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
+	if (err)
+		goto err_free_mc_irq;
+
+	dpaa2_phc_index = ptp_qoriq->phc_index;
+	dev_set_drvdata(dev, ptp_qoriq);
+
+	return 0;
+
+err_free_mc_irq:
+	fsl_mc_free_irqs(mc_dev);
+err_unmap:
+	iounmap(base);
+err_close:
+	dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+	fsl_mc_portal_free(mc_dev->mc_io);
+err_exit:
+	return err;
+}
+
+static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
+{
+	struct device *dev = &mc_dev->dev;
+	struct ptp_qoriq *ptp_qoriq;
+
+	ptp_qoriq = dev_get_drvdata(dev);
+
+	dpaa2_phc_index = -1;
+	ptp_qoriq_free(ptp_qoriq);
+
+	fsl_mc_free_irqs(mc_dev);
+	dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+	fsl_mc_portal_free(mc_dev->mc_io);
+
+	return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
+	{
+		.vendor = FSL_MC_VENDOR_FREESCALE,
+		.obj_type = "dprtc",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table);
+
+static struct fsl_mc_driver dpaa2_ptp_drv = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = dpaa2_ptp_probe,
+	.remove = dpaa2_ptp_remove,
+	.match_id_table = dpaa2_ptp_match_id_table,
+};
+
+module_fsl_mc_driver(dpaa2_ptp_drv);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
new file mode 100644
index 0000000..df2458a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTC_H
+#define __RTC_H
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+extern int dpaa2_phc_index;
+
+#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
new file mode 100644
index 0000000..6de613b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
@@ -0,0 +1,480 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <linux/types.h>
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS		4
+/**
+ * Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS	10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+	DPKG_FROM_HDR = 0,
+	DPKG_FROM_FIELD = 1,
+	DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ *	e.g. can be used to extract header existence;
+ *	please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+	DPKG_EXTRACT_FROM_HDR = 0,
+	DPKG_EXTRACT_FROM_DATA = 1,
+	DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+	u8 mask;
+	u8 offset;
+};
+
+/* Protocol fields */
+
+/* Ethernet fields */
+#define NH_FLD_ETH_DA				BIT(0)
+#define NH_FLD_ETH_SA				BIT(1)
+#define NH_FLD_ETH_LENGTH			BIT(2)
+#define NH_FLD_ETH_TYPE				BIT(3)
+#define NH_FLD_ETH_FINAL_CKSUM			BIT(4)
+#define NH_FLD_ETH_PADDING			BIT(5)
+#define NH_FLD_ETH_ALL_FIELDS			(BIT(6) - 1)
+
+/* VLAN fields */
+#define NH_FLD_VLAN_VPRI			BIT(0)
+#define NH_FLD_VLAN_CFI				BIT(1)
+#define NH_FLD_VLAN_VID				BIT(2)
+#define NH_FLD_VLAN_LENGTH			BIT(3)
+#define NH_FLD_VLAN_TYPE			BIT(4)
+#define NH_FLD_VLAN_ALL_FIELDS			(BIT(5) - 1)
+
+#define NH_FLD_VLAN_TCI				(NH_FLD_VLAN_VPRI | \
+						 NH_FLD_VLAN_CFI | \
+						 NH_FLD_VLAN_VID)
+
+/* IP (generic) fields */
+#define NH_FLD_IP_VER				BIT(0)
+#define NH_FLD_IP_DSCP				BIT(2)
+#define NH_FLD_IP_ECN				BIT(3)
+#define NH_FLD_IP_PROTO				BIT(4)
+#define NH_FLD_IP_SRC				BIT(5)
+#define NH_FLD_IP_DST				BIT(6)
+#define NH_FLD_IP_TOS_TC			BIT(7)
+#define NH_FLD_IP_ID				BIT(8)
+#define NH_FLD_IP_ALL_FIELDS			(BIT(9) - 1)
+
+/* IPV4 fields */
+#define NH_FLD_IPV4_VER				BIT(0)
+#define NH_FLD_IPV4_HDR_LEN			BIT(1)
+#define NH_FLD_IPV4_TOS				BIT(2)
+#define NH_FLD_IPV4_TOTAL_LEN			BIT(3)
+#define NH_FLD_IPV4_ID				BIT(4)
+#define NH_FLD_IPV4_FLAG_D			BIT(5)
+#define NH_FLD_IPV4_FLAG_M			BIT(6)
+#define NH_FLD_IPV4_OFFSET			BIT(7)
+#define NH_FLD_IPV4_TTL				BIT(8)
+#define NH_FLD_IPV4_PROTO			BIT(9)
+#define NH_FLD_IPV4_CKSUM			BIT(10)
+#define NH_FLD_IPV4_SRC_IP			BIT(11)
+#define NH_FLD_IPV4_DST_IP			BIT(12)
+#define NH_FLD_IPV4_OPTS			BIT(13)
+#define NH_FLD_IPV4_OPTS_COUNT			BIT(14)
+#define NH_FLD_IPV4_ALL_FIELDS			(BIT(15) - 1)
+
+/* IPV6 fields */
+#define NH_FLD_IPV6_VER				BIT(0)
+#define NH_FLD_IPV6_TC				BIT(1)
+#define NH_FLD_IPV6_SRC_IP			BIT(2)
+#define NH_FLD_IPV6_DST_IP			BIT(3)
+#define NH_FLD_IPV6_NEXT_HDR			BIT(4)
+#define NH_FLD_IPV6_FL				BIT(5)
+#define NH_FLD_IPV6_HOP_LIMIT			BIT(6)
+#define NH_FLD_IPV6_ID				BIT(7)
+#define NH_FLD_IPV6_ALL_FIELDS			(BIT(8) - 1)
+
+/* ICMP fields */
+#define NH_FLD_ICMP_TYPE			BIT(0)
+#define NH_FLD_ICMP_CODE			BIT(1)
+#define NH_FLD_ICMP_CKSUM			BIT(2)
+#define NH_FLD_ICMP_ID				BIT(3)
+#define NH_FLD_ICMP_SQ_NUM			BIT(4)
+#define NH_FLD_ICMP_ALL_FIELDS			(BIT(5) - 1)
+
+/* IGMP fields */
+#define NH_FLD_IGMP_VERSION			BIT(0)
+#define NH_FLD_IGMP_TYPE			BIT(1)
+#define NH_FLD_IGMP_CKSUM			BIT(2)
+#define NH_FLD_IGMP_DATA			BIT(3)
+#define NH_FLD_IGMP_ALL_FIELDS			(BIT(4) - 1)
+
+/* TCP fields */
+#define NH_FLD_TCP_PORT_SRC			BIT(0)
+#define NH_FLD_TCP_PORT_DST			BIT(1)
+#define NH_FLD_TCP_SEQ				BIT(2)
+#define NH_FLD_TCP_ACK				BIT(3)
+#define NH_FLD_TCP_OFFSET			BIT(4)
+#define NH_FLD_TCP_FLAGS			BIT(5)
+#define NH_FLD_TCP_WINDOW			BIT(6)
+#define NH_FLD_TCP_CKSUM			BIT(7)
+#define NH_FLD_TCP_URGPTR			BIT(8)
+#define NH_FLD_TCP_OPTS				BIT(9)
+#define NH_FLD_TCP_OPTS_COUNT			BIT(10)
+#define NH_FLD_TCP_ALL_FIELDS			(BIT(11) - 1)
+
+/* UDP fields */
+#define NH_FLD_UDP_PORT_SRC			BIT(0)
+#define NH_FLD_UDP_PORT_DST			BIT(1)
+#define NH_FLD_UDP_LEN				BIT(2)
+#define NH_FLD_UDP_CKSUM			BIT(3)
+#define NH_FLD_UDP_ALL_FIELDS			(BIT(4) - 1)
+
+/* UDP-lite fields */
+#define NH_FLD_UDP_LITE_PORT_SRC		BIT(0)
+#define NH_FLD_UDP_LITE_PORT_DST		BIT(1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS		(BIT(2) - 1)
+
+/* UDP-encap-ESP fields */
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC		BIT(0)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST		BIT(1)
+#define NH_FLD_UDP_ENC_ESP_LEN			BIT(2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM		BIT(3)
+#define NH_FLD_UDP_ENC_ESP_SPI			BIT(4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM		BIT(5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS		(BIT(6) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_PORT_SRC			BIT(0)
+#define NH_FLD_SCTP_PORT_DST			BIT(1)
+#define NH_FLD_SCTP_VER_TAG			BIT(2)
+#define NH_FLD_SCTP_CKSUM			BIT(3)
+#define NH_FLD_SCTP_ALL_FIELDS			(BIT(4) - 1)
+
+/* DCCP fields */
+#define NH_FLD_DCCP_PORT_SRC			BIT(0)
+#define NH_FLD_DCCP_PORT_DST			BIT(1)
+#define NH_FLD_DCCP_ALL_FIELDS			(BIT(2) - 1)
+
+/* IPHC fields */
+#define NH_FLD_IPHC_CID				BIT(0)
+#define NH_FLD_IPHC_CID_TYPE			BIT(1)
+#define NH_FLD_IPHC_HCINDEX			BIT(2)
+#define NH_FLD_IPHC_GEN				BIT(3)
+#define NH_FLD_IPHC_D_BIT			BIT(4)
+#define NH_FLD_IPHC_ALL_FIELDS			(BIT(5) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE		BIT(0)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS		BIT(1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH		BIT(2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN		BIT(3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID	BIT(4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN	BIT(5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID	BIT(6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED	BIT(7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING	BIT(8)
+#define NH_FLD_SCTP_CHUNK_DATA_END		BIT(9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS	(BIT(10) - 1)
+
+/* L2TPV2 fields */
+#define NH_FLD_L2TPV2_TYPE_BIT			BIT(0)
+#define NH_FLD_L2TPV2_LENGTH_BIT		BIT(1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT		BIT(2)
+#define NH_FLD_L2TPV2_OFFSET_BIT		BIT(3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT		BIT(4)
+#define NH_FLD_L2TPV2_VERSION			BIT(5)
+#define NH_FLD_L2TPV2_LEN			BIT(6)
+#define NH_FLD_L2TPV2_TUNNEL_ID			BIT(7)
+#define NH_FLD_L2TPV2_SESSION_ID		BIT(8)
+#define NH_FLD_L2TPV2_NS			BIT(9)
+#define NH_FLD_L2TPV2_NR			BIT(10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE		BIT(11)
+#define NH_FLD_L2TPV2_FIRST_BYTE		BIT(12)
+#define NH_FLD_L2TPV2_ALL_FIELDS		(BIT(13) - 1)
+
+/* L2TPV3 fields */
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT		BIT(0)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT		BIT(1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT		BIT(2)
+#define NH_FLD_L2TPV3_CTRL_VERSION		BIT(3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH		BIT(4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL		BIT(5)
+#define NH_FLD_L2TPV3_CTRL_SENT			BIT(6)
+#define NH_FLD_L2TPV3_CTRL_RECV			BIT(7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE		BIT(8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS		(BIT(9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT		BIT(0)
+#define NH_FLD_L2TPV3_SESS_VERSION		BIT(1)
+#define NH_FLD_L2TPV3_SESS_ID			BIT(2)
+#define NH_FLD_L2TPV3_SESS_COOKIE		BIT(3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS		(BIT(4) - 1)
+
+/* PPP fields */
+#define NH_FLD_PPP_PID				BIT(0)
+#define NH_FLD_PPP_COMPRESSED			BIT(1)
+#define NH_FLD_PPP_ALL_FIELDS			(BIT(2) - 1)
+
+/* PPPoE fields */
+#define NH_FLD_PPPOE_VER			BIT(0)
+#define NH_FLD_PPPOE_TYPE			BIT(1)
+#define NH_FLD_PPPOE_CODE			BIT(2)
+#define NH_FLD_PPPOE_SID			BIT(3)
+#define NH_FLD_PPPOE_LEN			BIT(4)
+#define NH_FLD_PPPOE_SESSION			BIT(5)
+#define NH_FLD_PPPOE_PID			BIT(6)
+#define NH_FLD_PPPOE_ALL_FIELDS			(BIT(7) - 1)
+
+/* PPP-Mux fields */
+#define NH_FLD_PPPMUX_PID			BIT(0)
+#define NH_FLD_PPPMUX_CKSUM			BIT(1)
+#define NH_FLD_PPPMUX_COMPRESSED		BIT(2)
+#define NH_FLD_PPPMUX_ALL_FIELDS		(BIT(3) - 1)
+
+/* PPP-Mux sub-frame fields */
+#define NH_FLD_PPPMUX_SUBFRM_PFF		BIT(0)
+#define NH_FLD_PPPMUX_SUBFRM_LXT		BIT(1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN		BIT(2)
+#define NH_FLD_PPPMUX_SUBFRM_PID		BIT(3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID		BIT(4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS		(BIT(5) - 1)
+
+/* LLC fields */
+#define NH_FLD_LLC_DSAP				BIT(0)
+#define NH_FLD_LLC_SSAP				BIT(1)
+#define NH_FLD_LLC_CTRL				BIT(2)
+#define NH_FLD_LLC_ALL_FIELDS			(BIT(3) - 1)
+
+/* NLPID fields */
+#define NH_FLD_NLPID_NLPID			BIT(0)
+#define NH_FLD_NLPID_ALL_FIELDS			(BIT(1) - 1)
+
+/* SNAP fields */
+#define NH_FLD_SNAP_OUI				BIT(0)
+#define NH_FLD_SNAP_PID				BIT(1)
+#define NH_FLD_SNAP_ALL_FIELDS			(BIT(2) - 1)
+
+/* LLC SNAP fields */
+#define NH_FLD_LLC_SNAP_TYPE			BIT(0)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS		(BIT(1) - 1)
+
+/* ARP fields */
+#define NH_FLD_ARP_HTYPE			BIT(0)
+#define NH_FLD_ARP_PTYPE			BIT(1)
+#define NH_FLD_ARP_HLEN				BIT(2)
+#define NH_FLD_ARP_PLEN				BIT(3)
+#define NH_FLD_ARP_OPER				BIT(4)
+#define NH_FLD_ARP_SHA				BIT(5)
+#define NH_FLD_ARP_SPA				BIT(6)
+#define NH_FLD_ARP_THA				BIT(7)
+#define NH_FLD_ARP_TPA				BIT(8)
+#define NH_FLD_ARP_ALL_FIELDS			(BIT(9) - 1)
+
+/* RFC2684 fields */
+#define NH_FLD_RFC2684_LLC			BIT(0)
+#define NH_FLD_RFC2684_NLPID			BIT(1)
+#define NH_FLD_RFC2684_OUI			BIT(2)
+#define NH_FLD_RFC2684_PID			BIT(3)
+#define NH_FLD_RFC2684_VPN_OUI			BIT(4)
+#define NH_FLD_RFC2684_VPN_IDX			BIT(5)
+#define NH_FLD_RFC2684_ALL_FIELDS		(BIT(6) - 1)
+
+/* User defined fields */
+#define NH_FLD_USER_DEFINED_SRCPORT		BIT(0)
+#define NH_FLD_USER_DEFINED_PCDID		BIT(1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS		(BIT(2) - 1)
+
+/* Payload fields */
+#define NH_FLD_PAYLOAD_BUFFER			BIT(0)
+#define NH_FLD_PAYLOAD_SIZE			BIT(1)
+#define NH_FLD_MAX_FRM_SIZE			BIT(2)
+#define NH_FLD_MIN_FRM_SIZE			BIT(3)
+#define NH_FLD_PAYLOAD_TYPE			BIT(4)
+#define NH_FLD_FRAME_SIZE			BIT(5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS		(BIT(6) - 1)
+
+/* GRE fields */
+#define NH_FLD_GRE_TYPE				BIT(0)
+#define NH_FLD_GRE_ALL_FIELDS			(BIT(1) - 1)
+
+/* MINENCAP fields */
+#define NH_FLD_MINENCAP_SRC_IP			BIT(0)
+#define NH_FLD_MINENCAP_DST_IP			BIT(1)
+#define NH_FLD_MINENCAP_TYPE			BIT(2)
+#define NH_FLD_MINENCAP_ALL_FIELDS		(BIT(3) - 1)
+
+/* IPSEC AH fields */
+#define NH_FLD_IPSEC_AH_SPI			BIT(0)
+#define NH_FLD_IPSEC_AH_NH			BIT(1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS		(BIT(2) - 1)
+
+/* IPSEC ESP fields */
+#define NH_FLD_IPSEC_ESP_SPI			BIT(0)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM		BIT(1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS		(BIT(2) - 1)
+
+/* MPLS fields */
+#define NH_FLD_MPLS_LABEL_STACK			BIT(0)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS	(BIT(1) - 1)
+
+/* MACSEC fields */
+#define NH_FLD_MACSEC_SECTAG			BIT(0)
+#define NH_FLD_MACSEC_ALL_FIELDS		(BIT(1) - 1)
+
+/* GTP fields */
+#define NH_FLD_GTP_TEID				BIT(0)
+
+/* Supported protocols */
+enum net_prot {
+	NET_PROT_NONE = 0,
+	NET_PROT_PAYLOAD,
+	NET_PROT_ETH,
+	NET_PROT_VLAN,
+	NET_PROT_IPV4,
+	NET_PROT_IPV6,
+	NET_PROT_IP,
+	NET_PROT_TCP,
+	NET_PROT_UDP,
+	NET_PROT_UDP_LITE,
+	NET_PROT_IPHC,
+	NET_PROT_SCTP,
+	NET_PROT_SCTP_CHUNK_DATA,
+	NET_PROT_PPPOE,
+	NET_PROT_PPP,
+	NET_PROT_PPPMUX,
+	NET_PROT_PPPMUX_SUBFRM,
+	NET_PROT_L2TPV2,
+	NET_PROT_L2TPV3_CTRL,
+	NET_PROT_L2TPV3_SESS,
+	NET_PROT_LLC,
+	NET_PROT_LLC_SNAP,
+	NET_PROT_NLPID,
+	NET_PROT_SNAP,
+	NET_PROT_MPLS,
+	NET_PROT_IPSEC_AH,
+	NET_PROT_IPSEC_ESP,
+	NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+	NET_PROT_MACSEC,
+	NET_PROT_GRE,
+	NET_PROT_MINENCAP,
+	NET_PROT_DCCP,
+	NET_PROT_ICMP,
+	NET_PROT_IGMP,
+	NET_PROT_ARP,
+	NET_PROT_CAPWAP_DATA,
+	NET_PROT_CAPWAP_CTRL,
+	NET_PROT_RFC2684,
+	NET_PROT_ICMPV6,
+	NET_PROT_FCOE,
+	NET_PROT_FIP,
+	NET_PROT_ISCSI,
+	NET_PROT_GTP,
+	NET_PROT_USER_DEFINED_L2,
+	NET_PROT_USER_DEFINED_L3,
+	NET_PROT_USER_DEFINED_L4,
+	NET_PROT_USER_DEFINED_L5,
+	NET_PROT_USER_DEFINED_SHIM1,
+	NET_PROT_USER_DEFINED_SHIM2,
+
+	NET_PROT_DUMMY_LAST
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ *	DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ *	DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ *	DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @extract.from_parse:  Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @extract.from_hdr.prot: Any of the supported headers
+ * @extract.from_hdr.type: Defines the type of header extraction:
+ *	DPKG_FROM_HDR: use size & offset below;
+ *	DPKG_FROM_FIELD: use field, size and offset below;
+ *	DPKG_FULL_FIELD: use field below
+ * @extract.from_hdr.field: One of the supported fields (NH_FLD_)
+ * @extract.from_hdr.size: Size in bytes
+ * @extract.from_hdr.offset: Byte offset
+ * @extract.from_hdr.hdr_index: Clear for cases not listed below;
+ *	Used for protocols that may have more than a single
+ *	header, 0 indicates an outer header;
+ *	Supported protocols (possible values):
+ *	NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ *	NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ *	NET_PROT_IP(0, HDR_INDEX_LAST);
+ *	NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ *	NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ * @extract.from_data.size: Size in bytes
+ * @extract.from_data.offset: Byte offset
+ * @extract.from_parse.size: Size in bytes
+ * @extract.from_parse.offset: Byte offset
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ *		This is	also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+	enum dpkg_extract_type type;
+	union {
+		struct {
+			enum net_prot			prot;
+			enum dpkg_extract_from_hdr_type type;
+			u32			field;
+			u8			size;
+			u8			offset;
+			u8			hdr_index;
+		} from_hdr;
+		struct {
+			u8 size;
+			u8 offset;
+		} from_data;
+		struct {
+			u8 size;
+			u8 offset;
+		} from_parse;
+	} extract;
+
+	u8		num_of_byte_masks;
+	struct dpkg_mask	masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ *				profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+	u8 num_extracts;
+	struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
new file mode 100644
index 0000000..d9b6918
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+#include "dpni.h"
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR				7
+#define DPNI_VER_MINOR				0
+#define DPNI_CMD_BASE_VERSION			1
+#define DPNI_CMD_ID_OFFSET			4
+
+#define DPNI_CMD(id)	(((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+
+#define DPNI_CMDID_OPEN					DPNI_CMD(0x801)
+#define DPNI_CMDID_CLOSE				DPNI_CMD(0x800)
+#define DPNI_CMDID_CREATE				DPNI_CMD(0x901)
+#define DPNI_CMDID_DESTROY				DPNI_CMD(0x900)
+#define DPNI_CMDID_GET_API_VERSION			DPNI_CMD(0xa01)
+
+#define DPNI_CMDID_ENABLE				DPNI_CMD(0x002)
+#define DPNI_CMDID_DISABLE				DPNI_CMD(0x003)
+#define DPNI_CMDID_GET_ATTR				DPNI_CMD(0x004)
+#define DPNI_CMDID_RESET				DPNI_CMD(0x005)
+#define DPNI_CMDID_IS_ENABLED				DPNI_CMD(0x006)
+
+#define DPNI_CMDID_SET_IRQ				DPNI_CMD(0x010)
+#define DPNI_CMDID_GET_IRQ				DPNI_CMD(0x011)
+#define DPNI_CMDID_SET_IRQ_ENABLE			DPNI_CMD(0x012)
+#define DPNI_CMDID_GET_IRQ_ENABLE			DPNI_CMD(0x013)
+#define DPNI_CMDID_SET_IRQ_MASK				DPNI_CMD(0x014)
+#define DPNI_CMDID_GET_IRQ_MASK				DPNI_CMD(0x015)
+#define DPNI_CMDID_GET_IRQ_STATUS			DPNI_CMD(0x016)
+#define DPNI_CMDID_CLEAR_IRQ_STATUS			DPNI_CMD(0x017)
+
+#define DPNI_CMDID_SET_POOLS				DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR			DPNI_CMD(0x20B)
+
+#define DPNI_CMDID_GET_QDID				DPNI_CMD(0x210)
+#define DPNI_CMDID_GET_TX_DATA_OFFSET			DPNI_CMD(0x212)
+#define DPNI_CMDID_GET_LINK_STATE			DPNI_CMD(0x215)
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH			DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH			DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG				DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING			DPNI_CMD(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC			DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC			DPNI_CMD(0x221)
+#define DPNI_CMDID_SET_UNICAST_PROMISC			DPNI_CMD(0x222)
+#define DPNI_CMDID_GET_UNICAST_PROMISC			DPNI_CMD(0x223)
+#define DPNI_CMDID_SET_PRIM_MAC				DPNI_CMD(0x224)
+#define DPNI_CMDID_GET_PRIM_MAC				DPNI_CMD(0x225)
+#define DPNI_CMDID_ADD_MAC_ADDR				DPNI_CMD(0x226)
+#define DPNI_CMDID_REMOVE_MAC_ADDR			DPNI_CMD(0x227)
+#define DPNI_CMDID_CLR_MAC_FILTERS			DPNI_CMD(0x228)
+
+#define DPNI_CMDID_SET_RX_TC_DIST			DPNI_CMD(0x235)
+
+#define DPNI_CMDID_ADD_FS_ENT				DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT			DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT				DPNI_CMD(0x246)
+
+#define DPNI_CMDID_GET_STATISTICS			DPNI_CMD(0x25D)
+#define DPNI_CMDID_GET_QUEUE				DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE				DPNI_CMD(0x260)
+#define DPNI_CMDID_GET_TAILDROP				DPNI_CMD(0x261)
+#define DPNI_CMDID_SET_TAILDROP				DPNI_CMD(0x262)
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR			DPNI_CMD(0x263)
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT			DPNI_CMD(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT			DPNI_CMD(0x265)
+
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE		DPNI_CMD(0x266)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION		DPNI_CMD(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION		DPNI_CMD(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP			DPNI_CMD(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP			DPNI_CMD(0x26A)
+#define DPNI_CMDID_GET_OFFLOAD				DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD				DPNI_CMD(0x26C)
+
+#define DPNI_CMDID_SET_RX_FS_DIST			DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST			DPNI_CMD(0x274)
+#define DPNI_CMDID_GET_LINK_CFG				DPNI_CMD(0x278)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field)	\
+	GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
+		DPNI_##field##_SHIFT)
+
+#define dpni_set_field(var, field, val)	\
+	((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
+#define dpni_get_field(var, field)	\
+	(((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
+
+struct dpni_cmd_open {
+	__le32 dpni_id;
+};
+
+#define DPNI_BACKUP_POOL(val, order)	(((val) & 0x1) << (order))
+struct dpni_cmd_set_pools {
+	/* cmd word 0 */
+	u8 num_dpbp;
+	u8 backup_pool_mask;
+	__le16 pad;
+	/* cmd word 0..4 */
+	__le32 dpbp_id[DPNI_MAX_DPBP];
+	/* cmd word 4..6 */
+	__le16 buffer_size[DPNI_MAX_DPBP];
+};
+
+/* The enable indication is always the least significant bit */
+#define DPNI_ENABLE_SHIFT		0
+#define DPNI_ENABLE_SIZE		1
+
+struct dpni_rsp_is_enabled {
+	u8 enabled;
+};
+
+struct dpni_rsp_get_irq {
+	/* response word 0 */
+	__le32 irq_val;
+	__le32 pad;
+	/* response word 1 */
+	__le64 irq_addr;
+	/* response word 2 */
+	__le32 irq_num;
+	__le32 type;
+};
+
+struct dpni_cmd_set_irq_enable {
+	u8 enable;
+	u8 pad[3];
+	u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_enable {
+	__le32 pad;
+	u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_enable {
+	u8 enabled;
+};
+
+struct dpni_cmd_set_irq_mask {
+	__le32 mask;
+	u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_mask {
+	__le32 pad;
+	u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_mask {
+	__le32 mask;
+};
+
+struct dpni_cmd_get_irq_status {
+	__le32 status;
+	u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_status {
+	__le32 status;
+};
+
+struct dpni_cmd_clear_irq_status {
+	__le32 status;
+	u8 irq_index;
+};
+
+struct dpni_rsp_get_attr {
+	/* response word 0 */
+	__le32 options;
+	u8 num_queues;
+	u8 num_tcs;
+	u8 mac_filter_entries;
+	u8 pad0;
+	/* response word 1 */
+	u8 vlan_filter_entries;
+	u8 pad1;
+	u8 qos_entries;
+	u8 pad2;
+	__le16 fs_entries;
+	__le16 pad3;
+	/* response word 2 */
+	u8 qos_key_size;
+	u8 fs_key_size;
+	__le16 wriop_version;
+};
+
+#define DPNI_ERROR_ACTION_SHIFT		0
+#define DPNI_ERROR_ACTION_SIZE		4
+#define DPNI_FRAME_ANN_SHIFT		4
+#define DPNI_FRAME_ANN_SIZE		1
+
+struct dpni_cmd_set_errors_behavior {
+	__le32 errors;
+	/* from least significant bit: error_action:4, set_frame_annotation:1 */
+	u8 flags;
+};
+
+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
+ * buffer layouts, but they all share the same parameters.
+ * If one of the functions changes, below structure needs to be split.
+ */
+
+#define DPNI_PASS_TS_SHIFT		0
+#define DPNI_PASS_TS_SIZE		1
+#define DPNI_PASS_PR_SHIFT		1
+#define DPNI_PASS_PR_SIZE		1
+#define DPNI_PASS_FS_SHIFT		2
+#define DPNI_PASS_FS_SIZE		1
+
+struct dpni_cmd_get_buffer_layout {
+	u8 qtype;
+};
+
+struct dpni_rsp_get_buffer_layout {
+	/* response word 0 */
+	u8 pad0[6];
+	/* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+	u8 flags;
+	u8 pad1;
+	/* response word 1 */
+	__le16 private_data_size;
+	__le16 data_align;
+	__le16 head_room;
+	__le16 tail_room;
+};
+
+struct dpni_cmd_set_buffer_layout {
+	/* cmd word 0 */
+	u8 qtype;
+	u8 pad0[3];
+	__le16 options;
+	/* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+	u8 flags;
+	u8 pad1;
+	/* cmd word 1 */
+	__le16 private_data_size;
+	__le16 data_align;
+	__le16 head_room;
+	__le16 tail_room;
+};
+
+struct dpni_cmd_set_offload {
+	u8 pad[3];
+	u8 dpni_offload;
+	__le32 config;
+};
+
+struct dpni_cmd_get_offload {
+	u8 pad[3];
+	u8 dpni_offload;
+};
+
+struct dpni_rsp_get_offload {
+	__le32 pad;
+	__le32 config;
+};
+
+struct dpni_cmd_get_qdid {
+	u8 qtype;
+};
+
+struct dpni_rsp_get_qdid {
+	__le16 qdid;
+};
+
+struct dpni_rsp_get_tx_data_offset {
+	__le16 data_offset;
+};
+
+struct dpni_cmd_get_statistics {
+	u8 page_number;
+};
+
+struct dpni_rsp_get_statistics {
+	__le64 counter[DPNI_STATISTICS_CNT];
+};
+
+struct dpni_cmd_link_cfg {
+	/* cmd word 0 */
+	__le64 pad0;
+	/* cmd word 1 */
+	__le32 rate;
+	__le32 pad1;
+	/* cmd word 2 */
+	__le64 options;
+};
+
+#define DPNI_LINK_STATE_SHIFT		0
+#define DPNI_LINK_STATE_SIZE		1
+
+struct dpni_rsp_get_link_state {
+	/* response word 0 */
+	__le32 pad0;
+	/* from LSB: up:1 */
+	u8 flags;
+	u8 pad1[3];
+	/* response word 1 */
+	__le32 rate;
+	__le32 pad2;
+	/* response word 2 */
+	__le64 options;
+};
+
+struct dpni_cmd_set_max_frame_length {
+	__le16 max_frame_length;
+};
+
+struct dpni_rsp_get_max_frame_length {
+	__le16 max_frame_length;
+};
+
+struct dpni_cmd_set_multicast_promisc {
+	u8 enable;
+};
+
+struct dpni_rsp_get_multicast_promisc {
+	u8 enabled;
+};
+
+struct dpni_cmd_set_unicast_promisc {
+	u8 enable;
+};
+
+struct dpni_rsp_get_unicast_promisc {
+	u8 enabled;
+};
+
+struct dpni_cmd_set_primary_mac_addr {
+	__le16 pad;
+	u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_primary_mac_addr {
+	__le16 pad;
+	u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_port_mac_addr {
+	__le16 pad;
+	u8 mac_addr[6];
+};
+
+struct dpni_cmd_add_mac_addr {
+	__le16 pad;
+	u8 mac_addr[6];
+};
+
+struct dpni_cmd_remove_mac_addr {
+	__le16 pad;
+	u8 mac_addr[6];
+};
+
+#define DPNI_UNICAST_FILTERS_SHIFT	0
+#define DPNI_UNICAST_FILTERS_SIZE	1
+#define DPNI_MULTICAST_FILTERS_SHIFT	1
+#define DPNI_MULTICAST_FILTERS_SIZE	1
+
+struct dpni_cmd_clear_mac_filters {
+	/* from LSB: unicast:1, multicast:1 */
+	u8 flags;
+};
+
+#define DPNI_DIST_MODE_SHIFT		0
+#define DPNI_DIST_MODE_SIZE		4
+#define DPNI_MISS_ACTION_SHIFT		4
+#define DPNI_MISS_ACTION_SIZE		4
+
+struct dpni_cmd_set_rx_tc_dist {
+	/* cmd word 0 */
+	__le16 dist_size;
+	u8 tc_id;
+	/* from LSB: dist_mode:4, miss_action:4 */
+	u8 flags;
+	__le16 pad0;
+	__le16 default_flow_id;
+	/* cmd word 1..5 */
+	__le64 pad1[5];
+	/* cmd word 6 */
+	__le64 key_cfg_iova;
+};
+
+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
+ * key_cfg_iova)
+ */
+struct dpni_mask_cfg {
+	u8 mask;
+	u8 offset;
+};
+
+#define DPNI_EFH_TYPE_SHIFT		0
+#define DPNI_EFH_TYPE_SIZE		4
+#define DPNI_EXTRACT_TYPE_SHIFT		0
+#define DPNI_EXTRACT_TYPE_SIZE		4
+
+struct dpni_dist_extract {
+	/* word 0 */
+	u8 prot;
+	/* EFH type stored in the 4 least significant bits */
+	u8 efh_type;
+	u8 size;
+	u8 offset;
+	__le32 field;
+	/* word 1 */
+	u8 hdr_index;
+	u8 constant;
+	u8 num_of_repeats;
+	u8 num_of_byte_masks;
+	/* Extraction type is stored in the 4 LSBs */
+	u8 extract_type;
+	u8 pad[3];
+	/* word 2 */
+	struct dpni_mask_cfg masks[4];
+};
+
+struct dpni_ext_set_rx_tc_dist {
+	/* extension word 0 */
+	u8 num_extracts;
+	u8 pad[7];
+	/* words 1..25 */
+	struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+struct dpni_cmd_get_queue {
+	u8 qtype;
+	u8 tc;
+	u8 index;
+};
+
+#define DPNI_DEST_TYPE_SHIFT		0
+#define DPNI_DEST_TYPE_SIZE		4
+#define DPNI_STASH_CTRL_SHIFT		6
+#define DPNI_STASH_CTRL_SIZE		1
+#define DPNI_HOLD_ACTIVE_SHIFT		7
+#define DPNI_HOLD_ACTIVE_SIZE		1
+
+struct dpni_rsp_get_queue {
+	/* response word 0 */
+	__le64 pad0;
+	/* response word 1 */
+	__le32 dest_id;
+	__le16 pad1;
+	u8 dest_prio;
+	/* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
+	u8 flags;
+	/* response word 2 */
+	__le64 flc;
+	/* response word 3 */
+	__le64 user_context;
+	/* response word 4 */
+	__le32 fqid;
+	__le16 qdbin;
+};
+
+struct dpni_cmd_set_queue {
+	/* cmd word 0 */
+	u8 qtype;
+	u8 tc;
+	u8 index;
+	u8 options;
+	__le32 pad0;
+	/* cmd word 1 */
+	__le32 dest_id;
+	__le16 pad1;
+	u8 dest_prio;
+	u8 flags;
+	/* cmd word 2 */
+	__le64 flc;
+	/* cmd word 3 */
+	__le64 user_context;
+};
+
+struct dpni_cmd_set_taildrop {
+	/* cmd word 0 */
+	u8 congestion_point;
+	u8 qtype;
+	u8 tc;
+	u8 index;
+	__le32 pad0;
+	/* cmd word 1 */
+	/* Only least significant bit is relevant */
+	u8 enable;
+	u8 pad1;
+	u8 units;
+	u8 pad2;
+	__le32 threshold;
+};
+
+struct dpni_cmd_get_taildrop {
+	u8 congestion_point;
+	u8 qtype;
+	u8 tc;
+	u8 index;
+};
+
+struct dpni_rsp_get_taildrop {
+	/* cmd word 0 */
+	__le64 pad0;
+	/* cmd word 1 */
+	/* only least significant bit is relevant */
+	u8 enable;
+	u8 pad1;
+	u8 units;
+	u8 pad2;
+	__le32 threshold;
+};
+
+struct dpni_rsp_get_api_version {
+	__le16 major;
+	__le16 minor;
+};
+
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT	0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE	1
+struct dpni_cmd_set_rx_fs_dist {
+	__le16 dist_size;
+	u8 enable;
+	u8 tc;
+	__le16 miss_flow_id;
+	__le16 pad;
+	__le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT	0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE	1
+struct dpni_cmd_set_rx_hash_dist {
+	__le16 dist_size;
+	u8 enable;
+	u8 tc;
+	__le32 pad;
+	__le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+	/* cmd word 0 */
+	__le16 options;
+	u8 tc_id;
+	u8 key_size;
+	__le16 index;
+	__le16 flow_id;
+	/* cmd word 1 */
+	__le64 key_iova;
+	/* cmd word 2 */
+	__le64 mask_iova;
+	/* cmd word 3 */
+	__le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+	/* cmd word 0 */
+	__le16 pad0;
+	u8 tc_id;
+	u8 key_size;
+	__le32 pad1;
+	/* cmd word 1 */
+	__le64 key_iova;
+	/* cmd word 2 */
+	__le64 mask_iova;
+};
+
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
new file mode 100644
index 0000000..dd54e69
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -0,0 +1,1788 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fsl/mc.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+/**
+ * dpni_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ *	- dpni_set_rx_tc_dist()
+ *	- dpni_set_qos_table()
+ */
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
+{
+	int i, j;
+	struct dpni_ext_set_rx_tc_dist *dpni_ext;
+	struct dpni_dist_extract *extr;
+
+	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+		return -EINVAL;
+
+	dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
+	dpni_ext->num_extracts = cfg->num_extracts;
+
+	for (i = 0; i < cfg->num_extracts; i++) {
+		extr = &dpni_ext->extracts[i];
+
+		switch (cfg->extracts[i].type) {
+		case DPKG_EXTRACT_FROM_HDR:
+			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+			dpni_set_field(extr->efh_type, EFH_TYPE,
+				       cfg->extracts[i].extract.from_hdr.type);
+			extr->size = cfg->extracts[i].extract.from_hdr.size;
+			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+			extr->field = cpu_to_le32(
+				cfg->extracts[i].extract.from_hdr.field);
+			extr->hdr_index =
+				cfg->extracts[i].extract.from_hdr.hdr_index;
+			break;
+		case DPKG_EXTRACT_FROM_DATA:
+			extr->size = cfg->extracts[i].extract.from_data.size;
+			extr->offset =
+				cfg->extracts[i].extract.from_data.offset;
+			break;
+		case DPKG_EXTRACT_FROM_PARSE:
+			extr->size = cfg->extracts[i].extract.from_parse.size;
+			extr->offset =
+				cfg->extracts[i].extract.from_parse.offset;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+		dpni_set_field(extr->extract_type, EXTRACT_TYPE,
+			       cfg->extracts[i].type);
+
+		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+			extr->masks[j].offset =
+				cfg->extracts[i].masks[j].offset;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id:	DPNI unique ID
+ * @token:	Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+	      u32 cmd_flags,
+	      int dpni_id,
+	      u16 *token)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_open *cmd_params;
+
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+					  cmd_flags,
+					  0);
+	cmd_params = (struct dpni_cmd_open *)cmd.params;
+	cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	*token = mc_cmd_hdr_read_token(&cmd);
+
+	return 0;
+}
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+	       u32 cmd_flags,
+	       u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cfg:	Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+		   u32 cmd_flags,
+		   u16 token,
+		   const struct dpni_pools_cfg *cfg)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_pools *cmd_params;
+	int i;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
+	cmd_params->num_dpbp = cfg->num_dpbp;
+	for (i = 0; i < DPNI_MAX_DPBP; i++) {
+		cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+		cmd_params->buffer_size[i] =
+			cpu_to_le16(cfg->pools[i].buffer_size);
+		cmd_params->backup_pool_mask |=
+			DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+	}
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:		Token of DPNI object
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+		u32 cmd_flags,
+		u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @en:		Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+		    u32 cmd_flags,
+		    u16 token,
+		    int *en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_is_enabled *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
+	*en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+	return 0;
+}
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+	       u32 cmd_flags,
+	       u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_irq_enable() - Set overall interrupt state.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @irq_index:	The interrupt index to configure
+ * @en:		Interrupt state: - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes.  The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u8 en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_irq_enable *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
+	dpni_set_field(cmd_params->enable, ENABLE, en);
+	cmd_params->irq_index = irq_index;
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_enable() - Get overall interrupt state
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @irq_index:	The interrupt index to configure
+ * @en:		Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u8 *en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_irq_enable *cmd_params;
+	struct dpni_rsp_get_irq_enable *rsp_params;
+
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
+	cmd_params->irq_index = irq_index;
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
+	*en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+	return 0;
+}
+
+/**
+ * dpni_set_irq_mask() - Set interrupt mask.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @irq_index:	The interrupt index to configure
+ * @mask:	event mask to trigger interrupt;
+ *			each bit:
+ *				0 = ignore event
+ *				1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 irq_index,
+		      u32 mask)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_irq_mask *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
+	cmd_params->mask = cpu_to_le32(mask);
+	cmd_params->irq_index = irq_index;
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_mask() - Get interrupt mask.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @irq_index:	The interrupt index to configure
+ * @mask:	Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 irq_index,
+		      u32 *mask)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_irq_mask *cmd_params;
+	struct dpni_rsp_get_irq_mask *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
+	cmd_params->irq_index = irq_index;
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
+	*mask = le32_to_cpu(rsp_params->mask);
+
+	return 0;
+}
+
+/**
+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @irq_index:	The interrupt index to configure
+ * @status:	Returned interrupts status - one bit per cause:
+ *			0 = no interrupt pending
+ *			1 = interrupt pending
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u32 *status)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_irq_status *cmd_params;
+	struct dpni_rsp_get_irq_status *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
+	cmd_params->status = cpu_to_le32(*status);
+	cmd_params->irq_index = irq_index;
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
+	*status = le32_to_cpu(rsp_params->status);
+
+	return 0;
+}
+
+/**
+ * dpni_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @irq_index:	The interrupt index to configure
+ * @status:	bits to clear (W1C) - one bit per cause:
+ *			0 = don't change
+ *			1 = clear status bit
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 token,
+			  u8 irq_index,
+			  u32 status)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_clear_irq_status *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
+	cmd_params->irq_index = irq_index;
+	cmd_params->status = cpu_to_le32(status);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @attr:	Object's attributes
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			struct dpni_attr *attr)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_attr *rsp_params;
+
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
+	attr->options = le32_to_cpu(rsp_params->options);
+	attr->num_queues = rsp_params->num_queues;
+	attr->num_tcs = rsp_params->num_tcs;
+	attr->mac_filter_entries = rsp_params->mac_filter_entries;
+	attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+	attr->qos_entries = rsp_params->qos_entries;
+	attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
+	attr->qos_key_size = rsp_params->qos_key_size;
+	attr->fs_key_size = rsp_params->fs_key_size;
+	attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
+
+	return 0;
+}
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cfg:	Errors configuration
+ *
+ * this function may be called numerous times with different
+ * error masks
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+			     u32 cmd_flags,
+			     u16 token,
+			     struct dpni_error_cfg *cfg)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_errors_behavior *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
+	cmd_params->errors = cpu_to_le32(cfg->errors);
+	dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
+	dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @qtype:	Type of queue to retrieve configuration for
+ * @layout:	Returns buffer layout attributes
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   enum dpni_queue_type qtype,
+			   struct dpni_buffer_layout *layout)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_buffer_layout *cmd_params;
+	struct dpni_rsp_get_buffer_layout *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
+	cmd_params->qtype = qtype;
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
+	layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
+	layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
+	layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+	layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
+	layout->data_align = le16_to_cpu(rsp_params->data_align);
+	layout->data_head_room = le16_to_cpu(rsp_params->head_room);
+	layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
+
+	return 0;
+}
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @qtype:	Type of queue this configuration applies to
+ * @layout:	Buffer layout configuration
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ *
+ * @warning	Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   enum dpni_queue_type qtype,
+			   const struct dpni_buffer_layout *layout)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_buffer_layout *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
+	cmd_params->qtype = qtype;
+	cmd_params->options = cpu_to_le16(layout->options);
+	dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
+	dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
+	dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+	cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
+	cmd_params->data_align = cpu_to_le16(layout->data_align);
+	cmd_params->head_room = cpu_to_le16(layout->data_head_room);
+	cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @type:	Type of DPNI offload
+ * @config:	Offload configuration.
+ *		For checksum offloads, non-zero value enables the offload
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ *
+ * @warning    Allowed only when DPNI is disabled
+ */
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+		     u32 cmd_flags,
+		     u16 token,
+		     enum dpni_offload type,
+		     u32 config)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_offload *cmd_params;
+
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
+	cmd_params->dpni_offload = type;
+	cmd_params->config = cpu_to_le32(config);
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+		     u32 cmd_flags,
+		     u16 token,
+		     enum dpni_offload type,
+		     u32 *config)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_offload *cmd_params;
+	struct dpni_rsp_get_offload *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
+	cmd_params->dpni_offload = type;
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
+	*config = le32_to_cpu(rsp_params->config);
+
+	return 0;
+}
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ *			for enqueue operations
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @qtype:	Type of queue to receive QDID for
+ * @qdid:	Returned virtual QDID value that should be used as an argument
+ *			in all enqueue operations
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+		  u32 cmd_flags,
+		  u16 token,
+		  enum dpni_queue_type qtype,
+		  u16 *qdid)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_qdid *cmd_params;
+	struct dpni_rsp_get_qdid *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
+	cmd_params->qtype = qtype;
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
+	*qdid = le16_to_cpu(rsp_params->qdid);
+
+	return 0;
+}
+
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+			    u32 cmd_flags,
+			    u16 token,
+			    u16 *data_offset)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_tx_data_offset *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
+	*data_offset = le16_to_cpu(rsp_params->data_offset);
+
+	return 0;
+}
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cfg:	Link configuration
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      const struct dpni_link_cfg *cfg)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_link_cfg *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_link_cfg *)cmd.params;
+	cmd_params->rate = cpu_to_le32(cfg->rate);
+	cmd_params->options = cpu_to_le64(cfg->options);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_link_cfg() - return the link configuration
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cfg:	Link configuration from dpni object
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      struct dpni_link_cfg *cfg)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_link_cfg *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_cmd_link_cfg *)cmd.params;
+	cfg->rate = le32_to_cpu(rsp_params->rate);
+	cfg->options = le64_to_cpu(rsp_params->options);
+
+	return err;
+}
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @state:	Returned link state;
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			struct dpni_link_state *state)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_link_state *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
+	state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+	state->rate = le32_to_cpu(rsp_params->rate);
+	state->options = le64_to_cpu(rsp_params->options);
+
+	return 0;
+}
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @max_frame_length:	Maximum received frame length (in
+ *				bytes); frame is discarded if its
+ *				length exceeds this value
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+			      u32 cmd_flags,
+			      u16 token,
+			      u16 max_frame_length)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_max_frame_length *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
+	cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @max_frame_length:	Maximum received frame length (in
+ *				bytes); frame is discarded if its
+ *				length exceeds this value
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+			      u32 cmd_flags,
+			      u16 token,
+			      u16 *max_frame_length)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_max_frame_length *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
+	*max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
+
+	return 0;
+}
+
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @en:		Set to '1' to enable; '0' to disable
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+			       u32 cmd_flags,
+			       u16 token,
+			       int en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_multicast_promisc *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
+	dpni_set_field(cmd_params->enable, ENABLE, en);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @en:		Returns '1' if enabled; '0' otherwise
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+			       u32 cmd_flags,
+			       u16 token,
+			       int *en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_multicast_promisc *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
+	*en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+	return 0;
+}
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @en:		Set to '1' to enable; '0' to disable
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+			     u32 cmd_flags,
+			     u16 token,
+			     int en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_unicast_promisc *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+	dpni_set_field(cmd_params->enable, ENABLE, en);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @en:		Returns '1' if enabled; '0' otherwise
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+			     u32 cmd_flags,
+			     u16 token,
+			     int *en)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_unicast_promisc *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
+	*en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+	return 0;
+}
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @mac_addr:	MAC address to set as primary address
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+			      u32 cmd_flags,
+			      u16 token,
+			      const u8 mac_addr[6])
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_primary_mac_addr *cmd_params;
+	int i;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
+	for (i = 0; i < 6; i++)
+		cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @mac_addr:	Returned MAC address
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+			      u32 cmd_flags,
+			      u16 token,
+			      u8 mac_addr[6])
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_primary_mac_addr *rsp_params;
+	int i, err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
+	for (i = 0; i < 6; i++)
+		mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+	return 0;
+}
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ *			port the DPNI is attached to
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @mac_addr:	MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   u8 mac_addr[6])
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_rsp_get_port_mac_addr *rsp_params;
+	int i, err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+					  cmd_flags,
+					  token);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
+	for (i = 0; i < 6; i++)
+		mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+	return 0;
+}
+
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @mac_addr:	MAC address to add
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      const u8 mac_addr[6])
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_add_mac_addr *cmd_params;
+	int i;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
+	for (i = 0; i < 6; i++)
+		cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @mac_addr:	MAC address to remove
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 const u8 mac_addr[6])
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_remove_mac_addr *cmd_params;
+	int i;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
+	for (i = 0; i < 6; i++)
+		cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @unicast:	Set to '1' to clear unicast addresses
+ * @multicast:	Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   int unicast,
+			   int multicast)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_clear_mac_filters *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
+	dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
+	dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @tc_id:	Traffic class selection (0-7)
+ * @cfg:	Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
+ *			first to prepare the key_cfg_iova parameter
+ *
+ * Return:	'0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 tc_id,
+			const struct dpni_rx_tc_dist_cfg *cfg)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
+	cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+	cmd_params->tc_id = tc_id;
+	dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
+	dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
+	cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
+	cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @qtype:	Type of queue - all queue types are supported, although
+ *		the command is ignored for Tx
+ * @tc:		Traffic class, in range 0 to NUM_TCS - 1
+ * @index:	Selects the specific queue out of the set allocated for the
+ *		same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @options:	A combination of DPNI_QUEUE_OPT_ values that control what
+ *		configuration options are set on the queue
+ * @queue:	Queue structure
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+		   u32 cmd_flags,
+		   u16 token,
+		   enum dpni_queue_type qtype,
+		   u8 tc,
+		   u8 index,
+		   u8 options,
+		   const struct dpni_queue *queue)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_queue *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
+	cmd_params->qtype = qtype;
+	cmd_params->tc = tc;
+	cmd_params->index = index;
+	cmd_params->options = options;
+	cmd_params->dest_id = cpu_to_le32(queue->destination.id);
+	cmd_params->dest_prio = queue->destination.priority;
+	dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
+	dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
+	dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
+		       queue->destination.hold_active);
+	cmd_params->flc = cpu_to_le64(queue->flc.value);
+	cmd_params->user_context = cpu_to_le64(queue->user_context);
+
+	/* send command to mc */
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @qtype:	Type of queue - all queue types are supported
+ * @tc:		Traffic class, in range 0 to NUM_TCS - 1
+ * @index:	Selects the specific queue out of the set allocated for the
+ *		same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @queue:	Queue configuration structure
+ * @qid:	Queue identification
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+		   u32 cmd_flags,
+		   u16 token,
+		   enum dpni_queue_type qtype,
+		   u8 tc,
+		   u8 index,
+		   struct dpni_queue *queue,
+		   struct dpni_queue_id *qid)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_queue *cmd_params;
+	struct dpni_rsp_get_queue *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
+	cmd_params->qtype = qtype;
+	cmd_params->tc = tc;
+	cmd_params->index = index;
+
+	/* send command to mc */
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
+	queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+	queue->destination.priority = rsp_params->dest_prio;
+	queue->destination.type = dpni_get_field(rsp_params->flags,
+						 DEST_TYPE);
+	queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+						  STASH_CTRL);
+	queue->destination.hold_active = dpni_get_field(rsp_params->flags,
+							HOLD_ACTIVE);
+	queue->flc.value = le64_to_cpu(rsp_params->flc);
+	queue->user_context = le64_to_cpu(rsp_params->user_context);
+	qid->fqid = le32_to_cpu(rsp_params->fqid);
+	qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
+	return 0;
+}
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @page:	Selects the statistics page to retrieve, see
+ *		DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
+ * @stat:	Structure containing the statistics
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 page,
+			union dpni_statistics *stat)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_statistics *cmd_params;
+	struct dpni_rsp_get_statistics *rsp_params;
+	int i, err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+	cmd_params->page_number = page;
+
+	/* send command to mc */
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
+	for (i = 0; i < DPNI_STATISTICS_CNT; i++)
+		stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
+
+	return 0;
+}
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cg_point:	Congestion point
+ * @q_type:	Queue type on which the taildrop is configured.
+ *		Only Rx queues are supported for now
+ * @tc:		Traffic class to apply this taildrop to
+ * @q_index:	Index of the queue if the DPNI supports multiple queues for
+ *		traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop:	Taildrop structure
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      enum dpni_congestion_point cg_point,
+		      enum dpni_queue_type qtype,
+		      u8 tc,
+		      u8 index,
+		      struct dpni_taildrop *taildrop)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_set_taildrop *cmd_params;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
+	cmd_params->congestion_point = cg_point;
+	cmd_params->qtype = qtype;
+	cmd_params->tc = tc;
+	cmd_params->index = index;
+	dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
+	cmd_params->units = taildrop->units;
+	cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+
+	/* send command to mc */
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cg_point:	Congestion point
+ * @q_type:	Queue type on which the taildrop is configured.
+ *		Only Rx queues are supported for now
+ * @tc:		Traffic class to apply this taildrop to
+ * @q_index:	Index of the queue if the DPNI supports multiple queues for
+ *		traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop:	Taildrop structure
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      enum dpni_congestion_point cg_point,
+		      enum dpni_queue_type qtype,
+		      u8 tc,
+		      u8 index,
+		      struct dpni_taildrop *taildrop)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpni_cmd_get_taildrop *cmd_params;
+	struct dpni_rsp_get_taildrop *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
+	cmd_params->congestion_point = cg_point;
+	cmd_params->qtype = qtype;
+	cmd_params->tc = tc;
+	cmd_params->index = index;
+
+	/* send command to mc */
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
+	taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
+	taildrop->units = rsp_params->units;
+	taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+
+	return 0;
+}
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver:	Major version of data path network interface API
+ * @minor_ver:	Minor version of data path network interface API
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver)
+{
+	struct dpni_rsp_get_api_version *rsp_params;
+	struct fsl_mc_command cmd = { 0 };
+	int err;
+
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+					  cmd_flags, 0);
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+	*major_ver = le16_to_cpu(rsp_params->major);
+	*minor_ver = le16_to_cpu(rsp_params->minor);
+
+	return 0;
+}
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx flow steering distribution
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cfg: Distribution configuration
+ *
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			const struct dpni_rx_dist_cfg *cfg)
+{
+	struct dpni_cmd_set_rx_fs_dist *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+	cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+	dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+	cmd_params->tc = cfg->tc;
+	cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+	cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx hash distribution
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the default queue
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 token,
+			  const struct dpni_rx_dist_cfg *cfg)
+{
+	struct dpni_cmd_set_rx_hash_dist *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+	cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+	dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
+	cmd_params->tc = cfg->tc;
+	cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ *			(to select a flow ID)
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @tc_id:	Traffic class selection (0-7)
+ * @index:	Location in the FS table where to insert the entry.
+ *		Only relevant if MASKING is enabled for FS
+ *		classification on this DPNI, it is ignored for exact match.
+ * @cfg:	Flow steering rule to add
+ * @action:	Action to be taken as result of a classification hit
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 tc_id,
+		      u16 index,
+		      const struct dpni_rule_cfg *cfg,
+		      const struct dpni_fs_action_cfg *action)
+{
+	struct dpni_cmd_add_fs_entry *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+	cmd_params->tc_id = tc_id;
+	cmd_params->key_size = cfg->key_size;
+	cmd_params->index = cpu_to_le16(index);
+	cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+	cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+	cmd_params->options = cpu_to_le16(action->options);
+	cmd_params->flow_id = cpu_to_le16(action->flow_id);
+	cmd_params->flc = cpu_to_le64(action->flc);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ *			    traffic class
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPNI object
+ * @tc_id:	Traffic class selection (0-7)
+ * @cfg:	Flow steering rule to remove
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 tc_id,
+			 const struct dpni_rule_cfg *cfg)
+{
+	struct dpni_cmd_remove_fs_entry *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+	cmd_params->tc_id = tc_id;
+	cmd_params->key_size = cfg->key_size;
+	cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+	cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
new file mode 100644
index 0000000..ee0711d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include "dpkg.h"
+
+struct fsl_mc_io;
+
+/**
+ * Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC				8
+/**
+ * Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP				8
+
+/**
+ * All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS				(u8)(-1)
+/**
+ * All flows within traffic class considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS			(u16)(-1)
+/**
+ * Generate new flow ID; see dpni_set_queue()
+ */
+#define DPNI_NEW_FLOW_ID			(u16)(-1)
+
+/**
+ * Tx traffic is always released to a buffer pool on transmit, there are no
+ * resources allocated to have the frames confirmed back to the source after
+ * transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE			0x000001
+/**
+ * Disables support for MAC address filtering for addresses other than primary
+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
+ * is disabled, only traffic matching the primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER			0x000002
+/**
+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
+ * traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING			0x000004
+/**
+ * Congestion can be managed in several ways, allowing the buffer pool to
+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
+ * of queues. If set, it configures a single congestion groups across all TCs.
+ * If reset, a congestion group is allocated for each TC. Only relevant if the
+ * DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION		0x000008
+/**
+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
+ * variants. Setting this bit on these SoCs will trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING		0x000010
+/**
+ * Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS				0x000020
+
+int dpni_open(struct fsl_mc_io	*mc_io,
+	      u32		cmd_flags,
+	      int		dpni_id,
+	      u16		*token);
+
+int dpni_close(struct fsl_mc_io	*mc_io,
+	       u32		cmd_flags,
+	       u16		token);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ *	must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpni_pools_cfg {
+	u8		num_dpbp;
+	struct {
+		int	dpbp_id;
+		u16	buffer_size;
+		int	backup_pool;
+	} pools[DPNI_MAX_DPBP];
+};
+
+int dpni_set_pools(struct fsl_mc_io		*mc_io,
+		   u32				cmd_flags,
+		   u16				token,
+		   const struct dpni_pools_cfg	*cfg);
+
+int dpni_enable(struct fsl_mc_io	*mc_io,
+		u32			cmd_flags,
+		u16			token);
+
+int dpni_disable(struct fsl_mc_io	*mc_io,
+		 u32			cmd_flags,
+		 u16			token);
+
+int dpni_is_enabled(struct fsl_mc_io	*mc_io,
+		    u32			cmd_flags,
+		    u16			token,
+		    int			*en);
+
+int dpni_reset(struct fsl_mc_io	*mc_io,
+	       u32		cmd_flags,
+	       u16		token);
+
+/**
+ * DPNI IRQ Index and Events
+ */
+
+/**
+ * IRQ index
+ */
+#define DPNI_IRQ_INDEX				0
+/**
+ * IRQ events:
+ *       indicates a change in link state
+ *       indicates a change in endpoint
+ */
+#define DPNI_IRQ_EVENT_LINK_CHANGED		0x00000001
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED		0x00000002
+
+int dpni_set_irq_enable(struct fsl_mc_io	*mc_io,
+			u32			cmd_flags,
+			u16			token,
+			u8			irq_index,
+			u8			en);
+
+int dpni_get_irq_enable(struct fsl_mc_io	*mc_io,
+			u32			cmd_flags,
+			u16			token,
+			u8			irq_index,
+			u8			*en);
+
+int dpni_set_irq_mask(struct fsl_mc_io	*mc_io,
+		      u32		cmd_flags,
+		      u16		token,
+		      u8		irq_index,
+		      u32		mask);
+
+int dpni_get_irq_mask(struct fsl_mc_io	*mc_io,
+		      u32		cmd_flags,
+		      u16		token,
+		      u8		irq_index,
+		      u32		*mask);
+
+int dpni_get_irq_status(struct fsl_mc_io	*mc_io,
+			u32			cmd_flags,
+			u16			token,
+			u8			irq_index,
+			u32			*status);
+
+int dpni_clear_irq_status(struct fsl_mc_io	*mc_io,
+			  u32			cmd_flags,
+			  u16			token,
+			  u8			irq_index,
+			  u32			status);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ *		DPNI_OPT_TX_FRM_RELEASE
+ *		DPNI_OPT_NO_MAC_FILTER
+ *		DPNI_OPT_HAS_POLICING
+ *		DPNI_OPT_SHARED_CONGESTION
+ *		DPNI_OPT_HAS_KEY_MASKING
+ *		DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ *		than this when adding QoS entries will result in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ *		key larger than this when composing the hash + FS key will
+ *		result in an error.
+ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
+ *		on 6, 5, 5 bits respectively.
+ */
+struct dpni_attr {
+	u32 options;
+	u8 num_queues;
+	u8 num_tcs;
+	u8 mac_filter_entries;
+	u8 vlan_filter_entries;
+	u8 qos_entries;
+	u16 fs_entries;
+	u8 qos_key_size;
+	u8 fs_key_size;
+	u16 wriop_version;
+};
+
+int dpni_get_attributes(struct fsl_mc_io	*mc_io,
+			u32			cmd_flags,
+			u16			token,
+			struct dpni_attr	*attr);
+
+/**
+ * DPNI errors
+ */
+
+/**
+ * Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE	0x00020000
+/**
+ * Frame length error
+ */
+#define DPNI_ERROR_FLE		0x00002000
+/**
+ * Frame physical error
+ */
+#define DPNI_ERROR_FPE		0x00001000
+/**
+ * Parsing header error
+ */
+#define DPNI_ERROR_PHE		0x00000020
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE		0x00000004
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE		0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+	DPNI_ERROR_ACTION_DISCARD = 0,
+	DPNI_ERROR_ACTION_CONTINUE = 1,
+	DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
+ *		status (FAS); relevant only for the non-discard action
+ */
+struct dpni_error_cfg {
+	u32			errors;
+	enum dpni_error_action	error_action;
+	int			set_frame_annotation;
+};
+
+int dpni_set_errors_behavior(struct fsl_mc_io		*mc_io,
+			     u32			cmd_flags,
+			     u16			token,
+			     struct dpni_error_cfg	*cfg);
+
+/**
+ * DPNI buffer layout modification options
+ */
+
+/**
+ * Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP		0x00000001
+/**
+ * Select to modify the parser-result setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT	0x00000002
+/**
+ * Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS	0x00000004
+/**
+ * Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE	0x00000008
+/**
+ * Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN		0x00000010
+/**
+ * Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM	0x00000020
+/**
+ * Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM	0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ *		layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+	u32	options;
+	int	pass_timestamp;
+	int	pass_parser_result;
+	int	pass_frame_status;
+	u16	private_data_size;
+	u16	data_align;
+	u16	data_head_room;
+	u16	data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */enum dpni_queue_type {
+	DPNI_QUEUE_RX,
+	DPNI_QUEUE_TX,
+	DPNI_QUEUE_TX_CONFIRM,
+	DPNI_QUEUE_RX_ERR,
+};
+
+int dpni_get_buffer_layout(struct fsl_mc_io		*mc_io,
+			   u32				cmd_flags,
+			   u16				token,
+			   enum dpni_queue_type		qtype,
+			   struct dpni_buffer_layout	*layout);
+
+int dpni_set_buffer_layout(struct fsl_mc_io		   *mc_io,
+			   u32				   cmd_flags,
+			   u16				   token,
+			   enum dpni_queue_type		   qtype,
+			   const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ */
+enum dpni_offload {
+	DPNI_OFF_RX_L3_CSUM,
+	DPNI_OFF_RX_L4_CSUM,
+	DPNI_OFF_TX_L3_CSUM,
+	DPNI_OFF_TX_L4_CSUM,
+};
+
+int dpni_set_offload(struct fsl_mc_io	*mc_io,
+		     u32		cmd_flags,
+		     u16		token,
+		     enum dpni_offload	type,
+		     u32		config);
+
+int dpni_get_offload(struct fsl_mc_io	*mc_io,
+		     u32		cmd_flags,
+		     u16		token,
+		     enum dpni_offload	type,
+		     u32		*config);
+
+int dpni_get_qdid(struct fsl_mc_io	*mc_io,
+		  u32			cmd_flags,
+		  u16			token,
+		  enum dpni_queue_type	qtype,
+		  u16			*qdid);
+
+int dpni_get_tx_data_offset(struct fsl_mc_io	*mc_io,
+			    u32			cmd_flags,
+			    u16			token,
+			    u16			*data_offset);
+
+#define DPNI_STATISTICS_CNT		7
+
+/**
+ * union dpni_statistics - Union describing the DPNI statistics
+ * @page_0: Page_0 statistics structure
+ * @page_0.ingress_all_frames: Ingress frame count
+ * @page_0.ingress_all_bytes: Ingress byte count
+ * @page_0.ingress_multicast_frames: Ingress multicast frame count
+ * @page_0.ingress_multicast_bytes: Ingress multicast byte count
+ * @page_0.ingress_broadcast_frames: Ingress broadcast frame count
+ * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count
+ * @page_1: Page_1 statistics structure
+ * @page_1.egress_all_frames: Egress frame count
+ * @page_1.egress_all_bytes: Egress byte count
+ * @page_1.egress_multicast_frames: Egress multicast frame count
+ * @page_1.egress_multicast_bytes: Egress multicast byte count
+ * @page_1.egress_broadcast_frames: Egress broadcast frame count
+ * @page_1.egress_broadcast_bytes: Egress broadcast byte count
+ * @page_2: Page_2 statistics structure
+ * @page_2.ingress_filtered_frames: Ingress filtered frame count
+ * @page_2.ingress_discarded_frames: Ingress discarded frame count
+ * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to
+ *	lack of buffers
+ * @page_2.egress_discarded_frames: Egress discarded frame count
+ * @page_2.egress_confirmed_frames: Egress confirmed frame count
+ * @page3: Page_3 statistics structure
+ * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes
+ *	dequeued from egress FQs
+ * @page_3.egress_dequeue_frames: Cumulative count of the number of frames
+ *	dequeued from egress FQs
+ * @page_3.egress_reject_bytes: Cumulative count of the number of bytes in
+ *	egress frames whose enqueue was rejected
+ * @page_3.egress_reject_frames: Cumulative count of the number of egress
+ *	frames whose enqueue was rejected
+ * @page_4: Page_4 statistics structure: congestion points
+ * @page_4.cgr_reject_frames: number of rejected frames due to congestion point
+ * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
+ * @page_5: Page_5 statistics structure: policer
+ * @page_5.policer_cnt_red: NUmber of red colored frames
+ * @page_5.policer_cnt_yellow: number of yellow colored frames
+ * @page_5.policer_cnt_green: number of green colored frames
+ * @page_5.policer_cnt_re_red: number of recolored red frames
+ * @page_5.policer_cnt_re_yellow: number of recolored yellow frames
+ * @page_6: Page_6 statistics structure
+ * @page_6.tx_pending_frames: total number of frames pending in egress FQs
+ * @raw: raw statistics structure, used to index counters
+ */
+union dpni_statistics {
+	struct {
+		u64 ingress_all_frames;
+		u64 ingress_all_bytes;
+		u64 ingress_multicast_frames;
+		u64 ingress_multicast_bytes;
+		u64 ingress_broadcast_frames;
+		u64 ingress_broadcast_bytes;
+	} page_0;
+	struct {
+		u64 egress_all_frames;
+		u64 egress_all_bytes;
+		u64 egress_multicast_frames;
+		u64 egress_multicast_bytes;
+		u64 egress_broadcast_frames;
+		u64 egress_broadcast_bytes;
+	} page_1;
+	struct {
+		u64 ingress_filtered_frames;
+		u64 ingress_discarded_frames;
+		u64 ingress_nobuffer_discards;
+		u64 egress_discarded_frames;
+		u64 egress_confirmed_frames;
+	} page_2;
+	struct {
+		u64 egress_dequeue_bytes;
+		u64 egress_dequeue_frames;
+		u64 egress_reject_bytes;
+		u64 egress_reject_frames;
+	} page_3;
+	struct {
+		u64 cgr_reject_frames;
+		u64 cgr_reject_bytes;
+	} page_4;
+	struct {
+		u64 policer_cnt_red;
+		u64 policer_cnt_yellow;
+		u64 policer_cnt_green;
+		u64 policer_cnt_re_red;
+		u64 policer_cnt_re_yellow;
+	} page_5;
+	struct {
+		u64 tx_pending_frames;
+	} page_6;
+	struct {
+		u64 counter[DPNI_STATISTICS_CNT];
+	} raw;
+};
+
+int dpni_get_statistics(struct fsl_mc_io	*mc_io,
+			u32			cmd_flags,
+			u16			token,
+			u8			page,
+			union dpni_statistics	*stat);
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPNI_LINK_OPT_AUTONEG		0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPNI_LINK_OPT_HALF_DUPLEX	0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPNI_LINK_OPT_PAUSE		0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPNI_LINK_OPT_ASYM_PAUSE	0x0000000000000008ULL
+
+/**
+ * struct - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+	u32 rate;
+	u64 options;
+};
+
+int dpni_set_link_cfg(struct fsl_mc_io			*mc_io,
+		      u32				cmd_flags,
+		      u16				token,
+		      const struct dpni_link_cfg	*cfg);
+
+int dpni_get_link_cfg(struct fsl_mc_io			*mc_io,
+		      u32				cmd_flags,
+		      u16				token,
+		      struct dpni_link_cfg		*cfg);
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+	u32	rate;
+	u64	options;
+	int	up;
+};
+
+int dpni_get_link_state(struct fsl_mc_io	*mc_io,
+			u32			cmd_flags,
+			u16			token,
+			struct dpni_link_state	*state);
+
+int dpni_set_max_frame_length(struct fsl_mc_io	*mc_io,
+			      u32		cmd_flags,
+			      u16		token,
+			      u16		max_frame_length);
+
+int dpni_get_max_frame_length(struct fsl_mc_io	*mc_io,
+			      u32		cmd_flags,
+			      u16		token,
+			      u16		*max_frame_length);
+
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+			       u32		cmd_flags,
+			       u16		token,
+			       int		en);
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+			       u32		cmd_flags,
+			       u16		token,
+			       int		*en);
+
+int dpni_set_unicast_promisc(struct fsl_mc_io	*mc_io,
+			     u32		cmd_flags,
+			     u16		token,
+			     int		en);
+
+int dpni_get_unicast_promisc(struct fsl_mc_io	*mc_io,
+			     u32		cmd_flags,
+			     u16		token,
+			     int		*en);
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+			      u32		cmd_flags,
+			      u16		token,
+			      const u8		mac_addr[6]);
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io	*mc_io,
+			      u32		cmd_flags,
+			      u16		token,
+			      u8		mac_addr[6]);
+
+int dpni_get_port_mac_addr(struct fsl_mc_io	*mc_io,
+			   u32			cm_flags,
+			   u16			token,
+			   u8			mac_addr[6]);
+
+int dpni_add_mac_addr(struct fsl_mc_io	*mc_io,
+		      u32		cmd_flags,
+		      u16		token,
+		      const u8		mac_addr[6]);
+
+int dpni_remove_mac_addr(struct fsl_mc_io	*mc_io,
+			 u32			cmd_flags,
+			 u16			token,
+			 const u8		mac_addr[6]);
+
+int dpni_clear_mac_filters(struct fsl_mc_io	*mc_io,
+			   u32			cmd_flags,
+			   u16			token,
+			   int			unicast,
+			   int			multicast);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ *		the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS:  Use explicit flow steering; only relevant if
+ *	 the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+	DPNI_DIST_MODE_NONE = 0,
+	DPNI_DIST_MODE_HASH = 1,
+	DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action -   DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+	DPNI_FS_MISS_DROP = 0,
+	DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+	DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+	enum dpni_fs_miss_action	miss_action;
+	u16				default_flow_id;
+};
+
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+			 u8 *key_cfg_buf);
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ *	supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ *	112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *		the extractions to be used for the distribution key by calling
+ *		dpni_prepare_key_cfg() relevant only when
+ *		'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ *		'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+	u16			dist_size;
+	enum dpni_dist_mode	dist_mode;
+	u64			key_cfg_iova;
+	struct dpni_fs_tbl_cfg	fs_cfg;
+};
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io			*mc_io,
+			u32					cmd_flags,
+			u16					token,
+			u8					tc_id,
+			const struct dpni_rx_tc_dist_cfg	*cfg);
+
+/**
+ * When used for fs_miss_flow_id in function dpni_set_rx_dist,
+ * will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP		((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - Rx distribution configuration
+ * @dist_size:	distribution size
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *		the extractions to be used for the distribution key by calling
+ *		dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
+ *		it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ *		hash is disabled it will be put into this queue id; use
+ *		DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ *		used only when flow steering distribution is enabled and hash
+ *		distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+	u16 dist_size;
+	u64 key_cfg_iova;
+	u8 enable;
+	u8 tc;
+	u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 token,
+			  const struct dpni_rx_dist_cfg *cfg);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ *		does not generate FQDAN notifications; user is expected to
+ *		dequeue from the queue based on polling or other user-defined
+ *		method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ *		notifications to the specified DPIO; user is expected to dequeue
+ *		from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ *		FQDAN notifications, but is connected to the specified DPCON
+ *		object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+	DPNI_DEST_NONE = 0,
+	DPNI_DEST_DPIO = 1,
+	DPNI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpni_queue - Queue structure
+ * @destination - Destination structure
+ * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ *	Identifies either a DPIO or a DPCON object.
+ *	Not relevant for Tx queues.
+ * @destination.type:	May be one of the following:
+ *	0 - No destination, queue can be manually
+ *		queried, but will not push traffic or
+ *		notifications to a DPIO;
+ *	1 - The destination is a DPIO. When traffic
+ *		becomes available in the queue a FQDAN
+ *		(FQ data available notification) will be
+ *		generated to selected DPIO;
+ *	2 - The destination is a DPCON. The queue is
+ *		associated with a DPCON object for the
+ *		purpose of scheduling between multiple
+ *		queues. The DPCON may be independently
+ *		configured to generate notifications.
+ *		Not relevant for Tx queues.
+ * @destination.hold_active: Hold active, maintains a queue scheduled for longer
+ *	in a DPIO during dequeue to reduce spread of traffic.
+ *	Only relevant if queues are
+ *	not affined to a single DPIO.
+ * @user_context: User data, presented to the user along with any frames
+ *	from this queue. Not relevant for Tx queues.
+ * @flc: FD FLow Context structure
+ * @flc.value: Default FLC value for traffic dequeued from
+ *      this queue.  Please check description of FD
+ *      structure for more information.
+ *      Note that FLC values set using dpni_add_fs_entry,
+ *      if any, take precedence over values per queue.
+ * @flc.stash_control: Boolean, indicates whether the 6 lowest
+ *      - significant bits are used for stash control.
+ *      significant bits are used for stash control.  If set, the 6
+ *      least significant bits in value are interpreted as follows:
+ *      - bits 0-1: indicates the number of 64 byte units of context
+ *      that are stashed.  FLC value is interpreted as a memory address
+ *      in this case, excluding the 6 LS bits.
+ *      - bits 2-3: indicates the number of 64 byte units of frame
+ *      annotation to be stashed.  Annotation is placed at FD[ADDR].
+ *      - bits 4-5: indicates the number of 64 byte units of frame
+ *      data to be stashed.  Frame data is placed at FD[ADDR] +
+ *      FD[OFFSET].
+ *      For more details check the Frame Descriptor section in the
+ *      hardware documentation.
+ */
+struct dpni_queue {
+	struct {
+		u16 id;
+		enum dpni_dest type;
+		char hold_active;
+		u8 priority;
+	} destination;
+	u64 user_context;
+	struct {
+		u64 value;
+		char stash_control;
+	} flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ *			or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
+ *		for Tx queues.
+ */
+struct dpni_queue_id {
+	u32 fqid;
+	u16 qdbin;
+};
+
+/**
+ * Set User Context
+ */
+#define DPNI_QUEUE_OPT_USER_CTX		0x00000001
+#define DPNI_QUEUE_OPT_DEST		0x00000002
+#define DPNI_QUEUE_OPT_FLC		0x00000004
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE	0x00000008
+
+int dpni_set_queue(struct fsl_mc_io	*mc_io,
+		   u32			cmd_flags,
+		   u16			token,
+		   enum dpni_queue_type	qtype,
+		   u8			tc,
+		   u8			index,
+		   u8			options,
+		   const struct dpni_queue *queue);
+
+int dpni_get_queue(struct fsl_mc_io	*mc_io,
+		   u32			cmd_flags,
+		   u16			token,
+		   enum dpni_queue_type	qtype,
+		   u8			tc,
+		   u8			index,
+		   struct dpni_queue	*queue,
+		   struct dpni_queue_id	*qid);
+
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+	DPNI_CONGESTION_UNIT_BYTES = 0,
+	DPNI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ *		QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
+ *		define the DPNI this can be either per TC (default) or per
+ *		interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ *		QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+	DPNI_CP_QUEUE,
+	DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable:	Indicates whether the taildrop is active or not.
+ * @units:	Indicates the unit of THRESHOLD. Queue taildrop only supports
+ *		byte units, this field is ignored and assumed = 0 if
+ *		CONGESTION_POINT is 0.
+ * @threshold:	Threshold value, in units identified by UNITS field. Value 0
+ *		cannot be used as a valid taildrop threshold, THRESHOLD must
+ *		be > 0 if the taildrop is enabled.
+ */
+struct dpni_taildrop {
+	char enable;
+	enum dpni_congestion_unit units;
+	u32 threshold;
+};
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      enum dpni_congestion_point cg_point,
+		      enum dpni_queue_type q_type,
+		      u8 tc,
+		      u8 q_index,
+		      struct dpni_taildrop *taildrop);
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      enum dpni_congestion_point cg_point,
+		      enum dpni_queue_type q_type,
+		      u8 tc,
+		      u8 q_index,
+		      struct dpni_taildrop *taildrop);
+
+/**
+ * struct dpni_rule_cfg - Rule configuration for table lookup
+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
+ * @key_size: key and mask size (in bytes)
+ */
+struct dpni_rule_cfg {
+	u64	key_iova;
+	u64	mask_iova;
+	u8	key_size;
+};
+
+/**
+ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD            0x1
+
+/**
+ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC            0x2
+
+/**
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ *     - bits 0-1: indicates the number of 64 byte units of context that are
+ *     stashed. FLC value is interpreted as a memory address in this case,
+ *     excluding the 6 LS bits.
+ *     - bits 2-3: indicates the number of 64 byte units of frame annotation
+ *     to be stashed. Annotation is placed at FD[ADDR].
+ *     - bits 4-5: indicates the number of 64 byte units of frame data to be
+ *     stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL  0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc:	FLC value for traffic matching this rule. Please check the
+ *		Frame Descriptor section in the hardware documentation for
+ *		more information.
+ * @flow_id:	Identifies the Rx queue used for matching traffic. Supported
+ *		values are in range 0 to num_queue-1.
+ * @options:	Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+	u64 flc;
+	u16 flow_id;
+	u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 tc_id,
+		      u16 index,
+		      const struct dpni_rule_cfg *cfg,
+		      const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 tc_id,
+			 const struct dpni_rule_cfg *cfg);
+
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver);
+
+#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
new file mode 100644
index 0000000..4ac05bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION		1
+#define DPRTC_CMD_ID_OFFSET		4
+
+#define DPRTC_CMD(id)	(((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE			DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN			DPRTC_CMD(0x810)
+
+#define DPRTC_CMDID_SET_IRQ_ENABLE		DPRTC_CMD(0x012)
+#define DPRTC_CMDID_GET_IRQ_ENABLE		DPRTC_CMD(0x013)
+#define DPRTC_CMDID_SET_IRQ_MASK		DPRTC_CMD(0x014)
+#define DPRTC_CMDID_GET_IRQ_MASK		DPRTC_CMD(0x015)
+#define DPRTC_CMDID_GET_IRQ_STATUS		DPRTC_CMD(0x016)
+#define DPRTC_CMDID_CLEAR_IRQ_STATUS		DPRTC_CMD(0x017)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+	__le32 dprtc_id;
+};
+
+struct dprtc_cmd_get_irq {
+	__le32 pad;
+	u8 irq_index;
+};
+
+struct dprtc_cmd_set_irq_enable {
+	u8 en;
+	u8 pad[3];
+	u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_enable {
+	u8 en;
+};
+
+struct dprtc_cmd_set_irq_mask {
+	__le32 mask;
+	u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_mask {
+	__le32 mask;
+};
+
+struct dprtc_cmd_get_irq_status {
+	__le32 status;
+	u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_status {
+	__le32 status;
+};
+
+struct dprtc_cmd_clear_irq_status {
+	__le32 status;
+	u8 irq_index;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
new file mode 100644
index 0000000..ed52a34
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id:	DPRTC unique ID
+ * @token:	Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+	       u32 cmd_flags,
+	       int dprtc_id,
+	       u16 *token)
+{
+	struct dprtc_cmd_open *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+	int err;
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+					  cmd_flags,
+					  0);
+	cmd_params = (struct dprtc_cmd_open *)cmd.params;
+	cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	*token = mc_cmd_hdr_read_token(&cmd);
+
+	return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+		u32 cmd_flags,
+		u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+					  token);
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ * @irq_index:	The interrupt index to configure
+ * @en:		Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes.  The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u8 en)
+{
+	struct dprtc_cmd_set_irq_enable *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
+	cmd_params->irq_index = irq_index;
+	cmd_params->en = en;
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_enable() - Get overall interrupt state
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ * @irq_index:	The interrupt index to configure
+ * @en:		Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u8 *en)
+{
+	struct dprtc_rsp_get_irq_enable *rsp_params;
+	struct dprtc_cmd_get_irq *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+	int err;
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+	cmd_params->irq_index = irq_index;
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
+	*en = rsp_params->en;
+
+	return 0;
+}
+
+/**
+ * dprtc_set_irq_mask() - Set interrupt mask.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ * @irq_index:	The interrupt index to configure
+ * @mask:	Event mask to trigger interrupt;
+ *		each bit:
+ *			0 = ignore event
+ *			1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       u8 irq_index,
+		       u32 mask)
+{
+	struct dprtc_cmd_set_irq_mask *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
+	cmd_params->mask = cpu_to_le32(mask);
+	cmd_params->irq_index = irq_index;
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_mask() - Get interrupt mask.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ * @irq_index:	The interrupt index to configure
+ * @mask:	Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       u8 irq_index,
+		       u32 *mask)
+{
+	struct dprtc_rsp_get_irq_mask *rsp_params;
+	struct dprtc_cmd_get_irq *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+	int err;
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+	cmd_params->irq_index = irq_index;
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
+	*mask = le32_to_cpu(rsp_params->mask);
+
+	return 0;
+}
+
+/**
+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
+ *
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ * @irq_index:	The interrupt index to configure
+ * @status:	Returned interrupts status - one bit per cause:
+ *			0 = no interrupt pending
+ *			1 = interrupt pending
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u32 *status)
+{
+	struct dprtc_cmd_get_irq_status *cmd_params;
+	struct dprtc_rsp_get_irq_status *rsp_params;
+	struct fsl_mc_command cmd = { 0 };
+	int err;
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
+	cmd_params->status = cpu_to_le32(*status);
+	cmd_params->irq_index = irq_index;
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
+	*status = le32_to_cpu(rsp_params->status);
+
+	return 0;
+}
+
+/**
+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
+ *
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPRTC object
+ * @irq_index:	The interrupt index to configure
+ * @status:	Bits to clear (W1C) - one bit per cause:
+ *			0 = don't change
+ *			1 = clear status bit
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   u8 irq_index,
+			   u32 status)
+{
+	struct dprtc_cmd_clear_irq_status *cmd_params;
+	struct fsl_mc_command cmd = { 0 };
+
+	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
+	cmd_params->irq_index = irq_index;
+	cmd_params->status = cpu_to_le32(status);
+
+	return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
new file mode 100644
index 0000000..311c184
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+/**
+ * Number of irq's
+ */
+#define DPRTC_MAX_IRQ_NUM	1
+#define DPRTC_IRQ_INDEX		0
+
+#define DPRTC_EVENT_PPS		0x08000000
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+	       u32 cmd_flags,
+	       int dprtc_id,
+	       u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+		u32 cmd_flags,
+		u16 token);
+
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u8 en);
+
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u8 *en);
+
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       u8 irq_index,
+		       u32 mask);
+
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       u8 irq_index,
+		       u32 *mask);
+
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u32 *status);
+
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   u8 irq_index,
+			   u32 status);
+
+#endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
new file mode 100644
index 0000000..c219587
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC
+	tristate "ENETC PF driver"
+	depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+	select PHYLIB
+	help
+	  This driver supports NXP ENETC gigabit ethernet controller PCIe
+	  physical function (PF) devices, managing ENETC Ports at a privileged
+	  level.
+
+	  If compiled as module (M), the module name is fsl-enetc.
+
+config FSL_ENETC_VF
+	tristate "ENETC VF driver"
+	depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+	select PHYLIB
+	help
+	  This driver supports NXP ENETC gigabit ethernet controller PCIe
+	  virtual function (VF) devices enabled by the ENETC PF driver.
+
+	  If compiled as module (M), the module name is fsl-enetc-vf.
+
+config FSL_ENETC_MDIO
+	tristate "ENETC MDIO driver"
+	depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+	help
+	  This driver supports NXP ENETC Central MDIO controller as a PCIe
+	  physical function (PF) device.
+
+	  If compiled as module (M), the module name is fsl-enetc-mdio.
+
+config FSL_ENETC_PTP_CLOCK
+	tristate "ENETC PTP clock driver"
+	depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
+	default y
+	help
+	  This driver adds support for using the ENETC 1588 timer
+	  as a PTP clock. This clock is only useful if your PTP
+	  programs are getting hardware time stamps on the PTP Ethernet
+	  packets using the SO_TIMESTAMPING API.
+
+	  If compiled as module (M), the module name is fsl-enetc-ptp.
+
+config FSL_ENETC_HW_TIMESTAMPING
+	bool "ENETC hardware timestamping support"
+	depends on FSL_ENETC || FSL_ENETC_VF
+	help
+	  Enable hardware timestamping support on the Ethernet packets
+	  using the SO_TIMESTAMPING API. Because the RX BD ring dynamic
+	  allocation has not been supported and it is too expensive to use
+	  extended RX BDs if timestamping is not used, this option enables
+	  extended RX BDs in order to support hardware timestamping.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
new file mode 100644
index 0000000..d200c27
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+
+obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
+fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
+fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+
+obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+
+obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
+fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
+
+obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
+fsl-enetc-ptp-y := enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
new file mode 100644
index 0000000..b6ff893
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/of_mdio.h>
+#include <linux/vmalloc.h>
+
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val)	((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS	13
+#define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+			      int active_offloads);
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_bdr *tx_ring;
+	int count;
+
+	tx_ring = priv->tx_ring[skb->queue_mapping];
+
+	if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+		if (unlikely(skb_linearize(skb)))
+			goto drop_packet_err;
+
+	count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+		netif_stop_subqueue(ndev, tx_ring->index);
+		return NETDEV_TX_BUSY;
+	}
+
+	count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
+	if (unlikely(!count))
+		goto drop_packet_err;
+
+	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+		netif_stop_subqueue(ndev, tx_ring->index);
+
+	return NETDEV_TX_OK;
+
+drop_packet_err:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
+{
+	int l3_start, l3_hsize;
+	u16 l3_flags, l4_flags;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return false;
+
+	switch (skb->csum_offset) {
+	case offsetof(struct tcphdr, check):
+		l4_flags = ENETC_TXBD_L4_TCP;
+		break;
+	case offsetof(struct udphdr, check):
+		l4_flags = ENETC_TXBD_L4_UDP;
+		break;
+	default:
+		skb_checksum_help(skb);
+		return false;
+	}
+
+	l3_start = skb_network_offset(skb);
+	l3_hsize = skb_network_header_len(skb);
+
+	l3_flags = 0;
+	if (skb->protocol == htons(ETH_P_IPV6))
+		l3_flags = ENETC_TXBD_L3_IPV6;
+
+	/* write BD fields */
+	txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
+	txbd->l4_csoff = l4_flags;
+
+	return true;
+}
+
+static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
+				struct enetc_tx_swbd *tx_swbd)
+{
+	if (tx_swbd->is_dma_page)
+		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
+			       tx_swbd->len, DMA_TO_DEVICE);
+	else
+		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
+				 tx_swbd->len, DMA_TO_DEVICE);
+	tx_swbd->dma = 0;
+}
+
+static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
+			      struct enetc_tx_swbd *tx_swbd)
+{
+	if (tx_swbd->dma)
+		enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+	if (tx_swbd->skb) {
+		dev_kfree_skb_any(tx_swbd->skb);
+		tx_swbd->skb = NULL;
+	}
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+			      int active_offloads)
+{
+	struct enetc_tx_swbd *tx_swbd;
+	skb_frag_t *frag;
+	int len = skb_headlen(skb);
+	union enetc_tx_bd temp_bd;
+	union enetc_tx_bd *txbd;
+	bool do_vlan, do_tstamp;
+	int i, count = 0;
+	unsigned int f;
+	dma_addr_t dma;
+	u8 flags = 0;
+
+	i = tx_ring->next_to_use;
+	txbd = ENETC_TXBD(*tx_ring, i);
+	prefetchw(txbd);
+
+	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+		goto dma_err;
+
+	temp_bd.addr = cpu_to_le64(dma);
+	temp_bd.buf_len = cpu_to_le16(len);
+	temp_bd.lstatus = 0;
+
+	tx_swbd = &tx_ring->tx_swbd[i];
+	tx_swbd->dma = dma;
+	tx_swbd->len = len;
+	tx_swbd->is_dma_page = 0;
+	count++;
+
+	do_vlan = skb_vlan_tag_present(skb);
+	do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
+		    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
+	tx_swbd->do_tstamp = do_tstamp;
+	tx_swbd->check_wb = tx_swbd->do_tstamp;
+
+	if (do_vlan || do_tstamp)
+		flags |= ENETC_TXBD_FLAGS_EX;
+
+	if (enetc_tx_csum(skb, &temp_bd))
+		flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+
+	/* first BD needs frm_len and offload flags set */
+	temp_bd.frm_len = cpu_to_le16(skb->len);
+	temp_bd.flags = flags;
+
+	if (flags & ENETC_TXBD_FLAGS_EX) {
+		u8 e_flags = 0;
+		*txbd = temp_bd;
+		enetc_clear_tx_bd(&temp_bd);
+
+		/* add extension BD for VLAN and/or timestamping */
+		flags = 0;
+		tx_swbd++;
+		txbd++;
+		i++;
+		if (unlikely(i == tx_ring->bd_count)) {
+			i = 0;
+			tx_swbd = tx_ring->tx_swbd;
+			txbd = ENETC_TXBD(*tx_ring, 0);
+		}
+		prefetchw(txbd);
+
+		if (do_vlan) {
+			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+			temp_bd.ext.tpid = 0; /* < C-TAG */
+			e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+		}
+
+		if (do_tstamp) {
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+			e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
+		}
+
+		temp_bd.ext.e_flags = e_flags;
+		count++;
+	}
+
+	frag = &skb_shinfo(skb)->frags[0];
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+		len = skb_frag_size(frag);
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_err;
+
+		*txbd = temp_bd;
+		enetc_clear_tx_bd(&temp_bd);
+
+		flags = 0;
+		tx_swbd++;
+		txbd++;
+		i++;
+		if (unlikely(i == tx_ring->bd_count)) {
+			i = 0;
+			tx_swbd = tx_ring->tx_swbd;
+			txbd = ENETC_TXBD(*tx_ring, 0);
+		}
+		prefetchw(txbd);
+
+		temp_bd.addr = cpu_to_le64(dma);
+		temp_bd.buf_len = cpu_to_le16(len);
+
+		tx_swbd->dma = dma;
+		tx_swbd->len = len;
+		tx_swbd->is_dma_page = 1;
+		count++;
+	}
+
+	/* last BD needs 'F' bit set */
+	flags |= ENETC_TXBD_FLAGS_F;
+	temp_bd.flags = flags;
+	*txbd = temp_bd;
+
+	tx_ring->tx_swbd[i].skb = skb;
+
+	enetc_bdr_idx_inc(tx_ring, &i);
+	tx_ring->next_to_use = i;
+
+	/* let H/W know BD ring has been updated */
+	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+
+	return count;
+
+dma_err:
+	dev_err(tx_ring->dev, "DMA map error");
+
+	do {
+		tx_swbd = &tx_ring->tx_swbd[i];
+		enetc_free_tx_skb(tx_ring, tx_swbd);
+		if (i == 0)
+			i = tx_ring->bd_count;
+		i--;
+	} while (count--);
+
+	return 0;
+}
+
+static irqreturn_t enetc_msix(int irq, void *data)
+{
+	struct enetc_int_vector	*v = data;
+	int i;
+
+	/* disable interrupts */
+	enetc_wr_reg(v->rbier, 0);
+
+	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+	napi_schedule_irqoff(&v->napi);
+
+	return IRQ_HANDLED;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+			       struct napi_struct *napi, int work_limit);
+
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+	struct enetc_int_vector
+		*v = container_of(napi, struct enetc_int_vector, napi);
+	bool complete = true;
+	int work_done;
+	int i;
+
+	for (i = 0; i < v->count_tx_rings; i++)
+		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+			complete = false;
+
+	work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
+	if (work_done == budget)
+		complete = false;
+
+	if (!complete)
+		return budget;
+
+	napi_complete_done(napi, work_done);
+
+	/* enable interrupts */
+	enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+
+	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
+			     ENETC_TBIER_TXTIE);
+
+	return work_done;
+}
+
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+{
+	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+
+	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
+
+static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
+				u64 *tstamp)
+{
+	u32 lo, hi, tstamp_lo;
+
+	lo = enetc_rd(hw, ENETC_SICTR0);
+	hi = enetc_rd(hw, ENETC_SICTR1);
+	tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
+	if (lo <= tstamp_lo)
+		hi -= 1;
+	*tstamp = (u64)hi << 32 | tstamp_lo;
+}
+
+static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
+{
+	struct skb_shared_hwtstamps shhwtstamps;
+
+	if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+		skb_tstamp_tx(skb, &shhwtstamps);
+	}
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+{
+	struct net_device *ndev = tx_ring->ndev;
+	int tx_frm_cnt = 0, tx_byte_cnt = 0;
+	struct enetc_tx_swbd *tx_swbd;
+	int i, bds_to_clean;
+	bool do_tstamp;
+	u64 tstamp = 0;
+
+	i = tx_ring->next_to_clean;
+	tx_swbd = &tx_ring->tx_swbd[i];
+	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+	do_tstamp = false;
+
+	while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
+		bool is_eof = !!tx_swbd->skb;
+
+		if (unlikely(tx_swbd->check_wb)) {
+			struct enetc_ndev_priv *priv = netdev_priv(ndev);
+			union enetc_tx_bd *txbd;
+
+			txbd = ENETC_TXBD(*tx_ring, i);
+
+			if (txbd->flags & ENETC_TXBD_FLAGS_W &&
+			    tx_swbd->do_tstamp) {
+				enetc_get_tx_tstamp(&priv->si->hw, txbd,
+						    &tstamp);
+				do_tstamp = true;
+			}
+		}
+
+		if (likely(tx_swbd->dma))
+			enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+		if (is_eof) {
+			if (unlikely(do_tstamp)) {
+				enetc_tstamp_tx(tx_swbd->skb, tstamp);
+				do_tstamp = false;
+			}
+			napi_consume_skb(tx_swbd->skb, napi_budget);
+			tx_swbd->skb = NULL;
+		}
+
+		tx_byte_cnt += tx_swbd->len;
+
+		bds_to_clean--;
+		tx_swbd++;
+		i++;
+		if (unlikely(i == tx_ring->bd_count)) {
+			i = 0;
+			tx_swbd = tx_ring->tx_swbd;
+		}
+
+		/* BD iteration loop end */
+		if (is_eof) {
+			tx_frm_cnt++;
+			/* re-arm interrupt source */
+			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
+				     BIT(16 + tx_ring->index));
+		}
+
+		if (unlikely(!bds_to_clean))
+			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+	}
+
+	tx_ring->next_to_clean = i;
+	tx_ring->stats.packets += tx_frm_cnt;
+	tx_ring->stats.bytes += tx_byte_cnt;
+
+	if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
+		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+		netif_wake_subqueue(ndev, tx_ring->index);
+	}
+
+	return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
+}
+
+static bool enetc_new_page(struct enetc_bdr *rx_ring,
+			   struct enetc_rx_swbd *rx_swbd)
+{
+	struct page *page;
+	dma_addr_t addr;
+
+	page = dev_alloc_page();
+	if (unlikely(!page))
+		return false;
+
+	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
+		__free_page(page);
+
+		return false;
+	}
+
+	rx_swbd->dma = addr;
+	rx_swbd->page = page;
+	rx_swbd->page_offset = ENETC_RXB_PAD;
+
+	return true;
+}
+
+static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+{
+	struct enetc_rx_swbd *rx_swbd;
+	union enetc_rx_bd *rxbd;
+	int i, j;
+
+	i = rx_ring->next_to_use;
+	rx_swbd = &rx_ring->rx_swbd[i];
+	rxbd = ENETC_RXBD(*rx_ring, i);
+
+	for (j = 0; j < buff_cnt; j++) {
+		/* try reuse page */
+		if (unlikely(!rx_swbd->page)) {
+			if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
+				rx_ring->stats.rx_alloc_errs++;
+				break;
+			}
+		}
+
+		/* update RxBD */
+		rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
+					   rx_swbd->page_offset);
+		/* clear 'R" as well */
+		rxbd->r.lstatus = 0;
+
+		rx_swbd++;
+		rxbd++;
+		i++;
+		if (unlikely(i == rx_ring->bd_count)) {
+			i = 0;
+			rx_swbd = rx_ring->rx_swbd;
+			rxbd = ENETC_RXBD(*rx_ring, 0);
+		}
+	}
+
+	if (likely(j)) {
+		rx_ring->next_to_alloc = i; /* keep track from page reuse */
+		rx_ring->next_to_use = i;
+		/* update ENETC's consumer index */
+		enetc_wr_reg(rx_ring->rcir, i);
+	}
+
+	return j;
+}
+
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+static void enetc_get_rx_tstamp(struct net_device *ndev,
+				union enetc_rx_bd *rxbd,
+				struct sk_buff *skb)
+{
+	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	u32 lo, hi, tstamp_lo;
+	u64 tstamp;
+
+	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
+		lo = enetc_rd(hw, ENETC_SICTR0);
+		hi = enetc_rd(hw, ENETC_SICTR1);
+		tstamp_lo = le32_to_cpu(rxbd->r.tstamp);
+		if (lo <= tstamp_lo)
+			hi -= 1;
+
+		tstamp = (u64)hi << 32 | tstamp_lo;
+		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+		shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
+	}
+}
+#endif
+
+static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
+{
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+	struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+#endif
+	/* TODO: hashing */
+	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
+		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
+
+		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+
+	/* copy VLAN to skb, if one is extracted, for now we assume it's a
+	 * standard TPID, but HW also supports custom values
+	 */
+	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       le16_to_cpu(rxbd->r.vlan_opt));
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+	if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+		enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
+#endif
+}
+
+static void enetc_process_skb(struct enetc_bdr *rx_ring,
+			      struct sk_buff *skb)
+{
+	skb_record_rx_queue(skb, rx_ring->index);
+	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+}
+
+static bool enetc_page_reusable(struct page *page)
+{
+	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
+}
+
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+			     struct enetc_rx_swbd *old)
+{
+	struct enetc_rx_swbd *new;
+
+	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+	/* next buf that may reuse a page */
+	enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+	/* copy page reference */
+	*new = *old;
+}
+
+static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
+					       int i, u16 size)
+{
+	struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
+				      rx_swbd->page_offset,
+				      size, DMA_FROM_DEVICE);
+	return rx_swbd;
+}
+
+static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
+			      struct enetc_rx_swbd *rx_swbd)
+{
+	if (likely(enetc_page_reusable(rx_swbd->page))) {
+		rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
+		page_ref_inc(rx_swbd->page);
+
+		enetc_reuse_page(rx_ring, rx_swbd);
+
+		/* sync for use by the device */
+		dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+						 rx_swbd->page_offset,
+						 ENETC_RXB_DMA_SIZE,
+						 DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+	}
+
+	rx_swbd->page = NULL;
+}
+
+static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
+						int i, u16 size)
+{
+	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+	struct sk_buff *skb;
+	void *ba;
+
+	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
+	skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+	if (unlikely(!skb)) {
+		rx_ring->stats.rx_alloc_errs++;
+		return NULL;
+	}
+
+	skb_reserve(skb, ENETC_RXB_PAD);
+	__skb_put(skb, size);
+
+	enetc_put_rx_buff(rx_ring, rx_swbd);
+
+	return skb;
+}
+
+static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
+				     u16 size, struct sk_buff *skb)
+{
+	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
+			rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
+
+	enetc_put_rx_buff(rx_ring, rx_swbd);
+}
+
+#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
+
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+			       struct napi_struct *napi, int work_limit)
+{
+	int rx_frm_cnt = 0, rx_byte_cnt = 0;
+	int cleaned_cnt, i;
+
+	cleaned_cnt = enetc_bd_unused(rx_ring);
+	/* next descriptor to process */
+	i = rx_ring->next_to_clean;
+
+	while (likely(rx_frm_cnt < work_limit)) {
+		union enetc_rx_bd *rxbd;
+		struct sk_buff *skb;
+		u32 bd_status;
+		u16 size;
+
+		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+
+			cleaned_cnt -= count;
+		}
+
+		rxbd = ENETC_RXBD(*rx_ring, i);
+		bd_status = le32_to_cpu(rxbd->r.lstatus);
+		if (!bd_status)
+			break;
+
+		enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+		dma_rmb(); /* for reading other rxbd fields */
+		size = le16_to_cpu(rxbd->r.buf_len);
+		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+		if (!skb)
+			break;
+
+		enetc_get_offloads(rx_ring, rxbd, skb);
+
+		cleaned_cnt++;
+		rxbd++;
+		i++;
+		if (unlikely(i == rx_ring->bd_count)) {
+			i = 0;
+			rxbd = ENETC_RXBD(*rx_ring, 0);
+		}
+
+		if (unlikely(bd_status &
+			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+			dev_kfree_skb(skb);
+			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+				dma_rmb();
+				bd_status = le32_to_cpu(rxbd->r.lstatus);
+				rxbd++;
+				i++;
+				if (unlikely(i == rx_ring->bd_count)) {
+					i = 0;
+					rxbd = ENETC_RXBD(*rx_ring, 0);
+				}
+			}
+
+			rx_ring->ndev->stats.rx_dropped++;
+			rx_ring->ndev->stats.rx_errors++;
+
+			break;
+		}
+
+		/* not last BD in frame? */
+		while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+			bd_status = le32_to_cpu(rxbd->r.lstatus);
+			size = ENETC_RXB_DMA_SIZE;
+
+			if (bd_status & ENETC_RXBD_LSTATUS_F) {
+				dma_rmb();
+				size = le16_to_cpu(rxbd->r.buf_len);
+			}
+
+			enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+
+			cleaned_cnt++;
+			rxbd++;
+			i++;
+			if (unlikely(i == rx_ring->bd_count)) {
+				i = 0;
+				rxbd = ENETC_RXBD(*rx_ring, 0);
+			}
+		}
+
+		rx_byte_cnt += skb->len;
+
+		enetc_process_skb(rx_ring, skb);
+
+		napi_gro_receive(napi, skb);
+
+		rx_frm_cnt++;
+	}
+
+	rx_ring->next_to_clean = i;
+
+	rx_ring->stats.packets += rx_frm_cnt;
+	rx_ring->stats.bytes += rx_byte_cnt;
+
+	return rx_frm_cnt;
+}
+
+/* Probing and Init */
+#define ENETC_MAX_RFS_SIZE 64
+void enetc_get_si_caps(struct enetc_si *si)
+{
+	struct enetc_hw *hw = &si->hw;
+	u32 val;
+
+	/* find out how many of various resources we have to work with */
+	val = enetc_rd(hw, ENETC_SICAPR0);
+	si->num_rx_rings = (val >> 16) & 0xff;
+	si->num_tx_rings = val & 0xff;
+
+	val = enetc_rd(hw, ENETC_SIRFSCAPR);
+	si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+	si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+
+	si->num_rss = 0;
+	val = enetc_rd(hw, ENETC_SIPCAPR0);
+	if (val & ENETC_SIPCAPR0_RSS) {
+		val = enetc_rd(hw, ENETC_SIRSSCAPR);
+		si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
+	}
+}
+
+static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+{
+	r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
+					&r->bd_dma_base, GFP_KERNEL);
+	if (!r->bd_base)
+		return -ENOMEM;
+
+	/* h/w requires 128B alignment */
+	if (!IS_ALIGNED(r->bd_dma_base, 128)) {
+		dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
+				  r->bd_dma_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+{
+	int err;
+
+	txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
+	if (!txr->tx_swbd)
+		return -ENOMEM;
+
+	err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+	if (err) {
+		vfree(txr->tx_swbd);
+		return err;
+	}
+
+	txr->next_to_clean = 0;
+	txr->next_to_use = 0;
+
+	return 0;
+}
+
+static void enetc_free_txbdr(struct enetc_bdr *txr)
+{
+	int size, i;
+
+	for (i = 0; i < txr->bd_count; i++)
+		enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+
+	size = txr->bd_count * sizeof(union enetc_tx_bd);
+
+	dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
+	txr->bd_base = NULL;
+
+	vfree(txr->tx_swbd);
+	txr->tx_swbd = NULL;
+}
+
+static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+{
+	int i, err;
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		err = enetc_alloc_txbdr(priv->tx_ring[i]);
+
+		if (err)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	while (i-- > 0)
+		enetc_free_txbdr(priv->tx_ring[i]);
+
+	return err;
+}
+
+static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_free_txbdr(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+{
+	int err;
+
+	rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
+	if (!rxr->rx_swbd)
+		return -ENOMEM;
+
+	err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+	if (err) {
+		vfree(rxr->rx_swbd);
+		return err;
+	}
+
+	rxr->next_to_clean = 0;
+	rxr->next_to_use = 0;
+	rxr->next_to_alloc = 0;
+
+	return 0;
+}
+
+static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+{
+	int size;
+
+	size = rxr->bd_count * sizeof(union enetc_rx_bd);
+
+	dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
+	rxr->bd_base = NULL;
+
+	vfree(rxr->rx_swbd);
+	rxr->rx_swbd = NULL;
+}
+
+static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+{
+	int i, err;
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+
+		if (err)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	while (i-- > 0)
+		enetc_free_rxbdr(priv->rx_ring[i]);
+
+	return err;
+}
+
+static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_free_rxbdr(priv->rx_ring[i]);
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+	int i;
+
+	if (!tx_ring->tx_swbd)
+		return;
+
+	for (i = 0; i < tx_ring->bd_count; i++) {
+		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+		enetc_free_tx_skb(tx_ring, tx_swbd);
+	}
+
+	tx_ring->next_to_clean = 0;
+	tx_ring->next_to_use = 0;
+}
+
+static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
+{
+	int i;
+
+	if (!rx_ring->rx_swbd)
+		return;
+
+	for (i = 0; i < rx_ring->bd_count; i++) {
+		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+		if (!rx_swbd->page)
+			continue;
+
+		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_page(rx_swbd->page);
+		rx_swbd->page = NULL;
+	}
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->next_to_alloc = 0;
+}
+
+static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_free_rx_ring(priv->rx_ring[i]);
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_free_tx_ring(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+					   GFP_KERNEL);
+	if (!cbdr->bd_base)
+		return -ENOMEM;
+
+	/* h/w requires 128B alignment */
+	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+		dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+		return -EINVAL;
+	}
+
+	cbdr->next_to_clean = 0;
+	cbdr->next_to_use = 0;
+
+	return 0;
+}
+
+static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+	dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+	cbdr->bd_base = NULL;
+}
+
+static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
+{
+	/* set CBDR cache attributes */
+	enetc_wr(hw, ENETC_SICAR2,
+		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+	enetc_wr(hw, ENETC_SICBDRPIR, 0);
+	enetc_wr(hw, ENETC_SICBDRCIR, 0);
+
+	/* enable ring */
+	enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
+
+	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+}
+
+static void enetc_clear_cbdr(struct enetc_hw *hw)
+{
+	enetc_wr(hw, ENETC_SICBDRMR, 0);
+}
+
+static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
+{
+	int *rss_table;
+	int i;
+
+	rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
+	if (!rss_table)
+		return -ENOMEM;
+
+	/* Set up RSS table defaults */
+	for (i = 0; i < si->num_rss; i++)
+		rss_table[i] = i % num_groups;
+
+	enetc_set_rss_table(si, rss_table, si->num_rss);
+
+	kfree(rss_table);
+
+	return 0;
+}
+
+static int enetc_configure_si(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+	struct enetc_hw *hw = &si->hw;
+	int err;
+
+	enetc_setup_cbdr(hw, &si->cbd_ring);
+	/* set SI cache attributes */
+	enetc_wr(hw, ENETC_SICAR0,
+		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+	enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+	/* enable SI */
+	enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+
+	if (si->num_rss) {
+		err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+	int cpus = num_online_cpus();
+
+	priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+	priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+
+	/* Enable all available TX rings in order to configure as many
+	 * priorities as possible, when needed.
+	 * TODO: Make # of TX rings run-time configurable
+	 */
+	priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
+	priv->num_tx_rings = si->num_tx_rings;
+	priv->bdr_int_num = cpus;
+
+	/* SI specific */
+	si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
+}
+
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+	int err;
+
+	err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
+	if (err)
+		return err;
+
+	priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
+				  GFP_KERNEL);
+	if (!priv->cls_rules) {
+		err = -ENOMEM;
+		goto err_alloc_cls;
+	}
+
+	err = enetc_configure_si(priv);
+	if (err)
+		goto err_config_si;
+
+	return 0;
+
+err_config_si:
+	kfree(priv->cls_rules);
+err_alloc_cls:
+	enetc_clear_cbdr(&si->hw);
+	enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+	return err;
+}
+
+void enetc_free_si_resources(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+
+	enetc_clear_cbdr(&si->hw);
+	enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+	kfree(priv->cls_rules);
+}
+
+static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+	int idx = tx_ring->index;
+	u32 tbmr;
+
+	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
+		       lower_32_bits(tx_ring->bd_dma_base));
+
+	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
+		       upper_32_bits(tx_ring->bd_dma_base));
+
+	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
+	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
+		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
+
+	/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
+	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
+	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
+
+	/* enable Tx ints by setting pkt thr to 1 */
+	enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
+
+	tbmr = ENETC_TBMR_EN;
+	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+		tbmr |= ENETC_TBMR_VIH;
+
+	/* enable ring */
+	enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+
+	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
+	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
+	tx_ring->idr = hw->reg + ENETC_SITXIDR;
+}
+
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+	int idx = rx_ring->index;
+	u32 rbmr;
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
+		       lower_32_bits(rx_ring->bd_dma_base));
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
+		       upper_32_bits(rx_ring->bd_dma_base));
+
+	WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
+		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+
+	/* enable Rx ints by setting pkt thr to 1 */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
+
+	rbmr = ENETC_RBMR_EN;
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+	rbmr |= ENETC_RBMR_BDS;
+#endif
+	if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		rbmr |= ENETC_RBMR_VTE;
+
+	rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
+	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+
+	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+
+	/* enable ring */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+}
+
+static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+	int idx = rx_ring->index;
+
+	/* disable EN bit on ring */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
+}
+
+static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+	int delay = 8, timeout = 100;
+	int idx = tx_ring->index;
+
+	/* disable EN bit on ring */
+	enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+
+	/* wait for busy to clear */
+	while (delay < timeout &&
+	       enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
+		msleep(delay);
+		delay *= 2;
+	}
+
+	if (delay >= timeout)
+		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
+			    idx);
+}
+
+static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+
+	udelay(1);
+}
+
+static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+{
+	struct pci_dev *pdev = priv->si->pdev;
+	cpumask_t cpu_mask;
+	int i, j, err;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+		struct enetc_int_vector *v = priv->int_vector[i];
+		int entry = ENETC_BDR_INT_BASE_IDX + i;
+		struct enetc_hw *hw = &priv->si->hw;
+
+		snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
+			 priv->ndev->name, i);
+		err = request_irq(irq, enetc_msix, 0, v->name, v);
+		if (err) {
+			dev_err(priv->dev, "request_irq() failed!\n");
+			goto irq_err;
+		}
+
+		v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
+		v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+
+		enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
+
+		for (j = 0; j < v->count_tx_rings; j++) {
+			int idx = v->tx_ring[j].index;
+
+			enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
+		}
+		cpumask_clear(&cpu_mask);
+		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+		irq_set_affinity_hint(irq, &cpu_mask);
+	}
+
+	return 0;
+
+irq_err:
+	while (i--) {
+		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+		irq_set_affinity_hint(irq, NULL);
+		free_irq(irq, priv->int_vector[i]);
+	}
+
+	return err;
+}
+
+static void enetc_free_irqs(struct enetc_ndev_priv *priv)
+{
+	struct pci_dev *pdev = priv->si->pdev;
+	int i;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+		irq_set_affinity_hint(irq, NULL);
+		free_irq(irq, priv->int_vector[i]);
+	}
+}
+
+static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	/* enable Tx & Rx event indication */
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		enetc_rxbdr_wr(&priv->si->hw, i,
+			       ENETC_RBIER, ENETC_RBIER_RXTIE);
+	}
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		enetc_txbdr_wr(&priv->si->hw, i,
+			       ENETC_TBIER, ENETC_TBIER_TXTIE);
+	}
+}
+
+static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+}
+
+static void adjust_link(struct net_device *ndev)
+{
+	struct phy_device *phydev = ndev->phydev;
+
+	phy_print_status(phydev);
+}
+
+static int enetc_phy_connect(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct phy_device *phydev;
+
+	if (!priv->phy_node)
+		return 0; /* phy-less mode */
+
+	phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
+				0, priv->if_mode);
+	if (!phydev) {
+		dev_err(&ndev->dev, "could not attach to PHY\n");
+		return -ENODEV;
+	}
+
+	phy_attached_info(phydev);
+
+	return 0;
+}
+
+int enetc_open(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	int i, err;
+
+	err = enetc_setup_irqs(priv);
+	if (err)
+		return err;
+
+	err = enetc_phy_connect(ndev);
+	if (err)
+		goto err_phy_connect;
+
+	err = enetc_alloc_tx_resources(priv);
+	if (err)
+		goto err_alloc_tx;
+
+	err = enetc_alloc_rx_resources(priv);
+	if (err)
+		goto err_alloc_rx;
+
+	enetc_setup_bdrs(priv);
+
+	err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
+	if (err)
+		goto err_set_queues;
+
+	for (i = 0; i < priv->bdr_int_num; i++)
+		napi_enable(&priv->int_vector[i]->napi);
+
+	enetc_enable_interrupts(priv);
+
+	if (ndev->phydev)
+		phy_start(ndev->phydev);
+	else
+		netif_carrier_on(ndev);
+
+	netif_tx_start_all_queues(ndev);
+
+	return 0;
+
+err_set_queues:
+	enetc_free_rx_resources(priv);
+err_alloc_rx:
+	enetc_free_tx_resources(priv);
+err_alloc_tx:
+	if (ndev->phydev)
+		phy_disconnect(ndev->phydev);
+err_phy_connect:
+	enetc_free_irqs(priv);
+
+	return err;
+}
+
+int enetc_close(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	int i;
+
+	netif_tx_stop_all_queues(ndev);
+
+	if (ndev->phydev) {
+		phy_stop(ndev->phydev);
+		phy_disconnect(ndev->phydev);
+	} else {
+		netif_carrier_off(ndev);
+	}
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		napi_synchronize(&priv->int_vector[i]->napi);
+		napi_disable(&priv->int_vector[i]->napi);
+	}
+
+	enetc_disable_interrupts(priv);
+	enetc_clear_bdrs(priv);
+
+	enetc_free_rxtx_rings(priv);
+	enetc_free_rx_resources(priv);
+	enetc_free_tx_resources(priv);
+	enetc_free_irqs(priv);
+
+	return 0;
+}
+
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+		   void *type_data)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct tc_mqprio_qopt *mqprio = type_data;
+	struct enetc_bdr *tx_ring;
+	u8 num_tc;
+	int i;
+
+	if (type != TC_SETUP_QDISC_MQPRIO)
+		return -EOPNOTSUPP;
+
+	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+	num_tc = mqprio->num_tc;
+
+	if (!num_tc) {
+		netdev_reset_tc(ndev);
+		netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+
+		/* Reset all ring priorities to 0 */
+		for (i = 0; i < priv->num_tx_rings; i++) {
+			tx_ring = priv->tx_ring[i];
+			enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+		}
+
+		return 0;
+	}
+
+	/* Check if we have enough BD rings available to accommodate all TCs */
+	if (num_tc > priv->num_tx_rings) {
+		netdev_err(ndev, "Max %d traffic classes supported\n",
+			   priv->num_tx_rings);
+		return -EINVAL;
+	}
+
+	/* For the moment, we use only one BD ring per TC.
+	 *
+	 * Configure num_tc BD rings with increasing priorities.
+	 */
+	for (i = 0; i < num_tc; i++) {
+		tx_ring = priv->tx_ring[i];
+		enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+	}
+
+	/* Reset the number of netdev queues based on the TC count */
+	netif_set_real_num_tx_queues(ndev, num_tc);
+
+	netdev_set_num_tc(ndev, num_tc);
+
+	/* Each TC is associated with one netdev queue */
+	for (i = 0; i < num_tc; i++)
+		netdev_set_tc_queue(ndev, i, 1, i);
+
+	return 0;
+}
+
+struct net_device_stats *enetc_get_stats(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	unsigned long packets = 0, bytes = 0;
+	int i;
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		packets += priv->rx_ring[i]->stats.packets;
+		bytes	+= priv->rx_ring[i]->stats.bytes;
+	}
+
+	stats->rx_packets = packets;
+	stats->rx_bytes = bytes;
+	bytes = 0;
+	packets = 0;
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		packets += priv->tx_ring[i]->stats.packets;
+		bytes	+= priv->tx_ring[i]->stats.bytes;
+	}
+
+	stats->tx_packets = packets;
+	stats->tx_bytes = bytes;
+
+	return stats;
+}
+
+static int enetc_set_rss(struct net_device *ndev, int en)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	u32 reg;
+
+	enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+	reg = enetc_rd(hw, ENETC_SIMR);
+	reg &= ~ENETC_SIMR_RSSE;
+	reg |= (en) ? ENETC_SIMR_RSSE : 0;
+	enetc_wr(hw, ENETC_SIMR, reg);
+
+	return 0;
+}
+
+int enetc_set_features(struct net_device *ndev,
+		       netdev_features_t features)
+{
+	netdev_features_t changed = ndev->features ^ features;
+
+	if (changed & NETIF_F_RXHASH)
+		enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+
+	return 0;
+}
+
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct hwtstamp_config config;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
+		break;
+	case HWTSTAMP_TX_ON:
+		priv->active_offloads |= ENETC_F_TX_TSTAMP;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+		break;
+	default:
+		priv->active_offloads |= ENETC_F_RX_TSTAMP;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+	}
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+	       -EFAULT : 0;
+}
+
+static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct hwtstamp_config config;
+
+	config.flags = 0;
+
+	if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+		config.tx_type = HWTSTAMP_TX_ON;
+	else
+		config.tx_type = HWTSTAMP_TX_OFF;
+
+	config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+	       -EFAULT : 0;
+}
+#endif
+
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+	if (cmd == SIOCSHWTSTAMP)
+		return enetc_hwtstamp_set(ndev, rq);
+	if (cmd == SIOCGHWTSTAMP)
+		return enetc_hwtstamp_get(ndev, rq);
+#endif
+	return -EINVAL;
+}
+
+int enetc_alloc_msix(struct enetc_ndev_priv *priv)
+{
+	struct pci_dev *pdev = priv->si->pdev;
+	int size, v_tx_rings;
+	int i, n, err, nvec;
+
+	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
+	/* allocate MSIX for both messaging and Rx/Tx interrupts */
+	n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+
+	if (n < 0)
+		return n;
+
+	if (n != nvec)
+		return -EPERM;
+
+	/* # of tx rings per int vector */
+	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+	size = sizeof(struct enetc_int_vector) +
+	       sizeof(struct enetc_bdr) * v_tx_rings;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		struct enetc_int_vector *v;
+		struct enetc_bdr *bdr;
+		int j;
+
+		v = kzalloc(size, GFP_KERNEL);
+		if (!v) {
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		priv->int_vector[i] = v;
+
+		netif_napi_add(priv->ndev, &v->napi, enetc_poll,
+			       NAPI_POLL_WEIGHT);
+		v->count_tx_rings = v_tx_rings;
+
+		for (j = 0; j < v_tx_rings; j++) {
+			int idx;
+
+			/* default tx ring mapping policy */
+			if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
+				idx = 2 * j + i; /* 2 CPUs */
+			else
+				idx = j + i * v_tx_rings; /* default */
+
+			__set_bit(idx, &v->tx_rings_map);
+			bdr = &v->tx_ring[j];
+			bdr->index = idx;
+			bdr->ndev = priv->ndev;
+			bdr->dev = priv->dev;
+			bdr->bd_count = priv->tx_bd_count;
+			priv->tx_ring[idx] = bdr;
+		}
+
+		bdr = &v->rx_ring;
+		bdr->index = i;
+		bdr->ndev = priv->ndev;
+		bdr->dev = priv->dev;
+		bdr->bd_count = priv->rx_bd_count;
+		priv->rx_ring[i] = bdr;
+	}
+
+	return 0;
+
+fail:
+	while (i--) {
+		netif_napi_del(&priv->int_vector[i]->napi);
+		kfree(priv->int_vector[i]);
+	}
+
+	pci_free_irq_vectors(pdev);
+
+	return err;
+}
+
+void enetc_free_msix(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		struct enetc_int_vector *v = priv->int_vector[i];
+
+		netif_napi_del(&v->napi);
+	}
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		priv->rx_ring[i] = NULL;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		priv->tx_ring[i] = NULL;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		kfree(priv->int_vector[i]);
+		priv->int_vector[i] = NULL;
+	}
+
+	/* disable all MSIX for this device */
+	pci_free_irq_vectors(priv->si->pdev);
+}
+
+static void enetc_kfree_si(struct enetc_si *si)
+{
+	char *p = (char *)si - si->pad;
+
+	kfree(p);
+}
+
+static void enetc_detect_errata(struct enetc_si *si)
+{
+	if (si->pdev->revision == ENETC_REV1)
+		si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
+			     ENETC_ERR_UCMCSWP;
+}
+
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
+{
+	struct enetc_si *si, *p;
+	struct enetc_hw *hw;
+	size_t alloc_size;
+	int err, len;
+
+	pcie_flr(pdev);
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "device enable failed\n");
+		return err;
+	}
+
+	/* set up for high or low dma */
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (err) {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"DMA configuration failed: 0x%x\n", err);
+			goto err_dma;
+		}
+	}
+
+	err = pci_request_mem_regions(pdev, name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+		goto err_pci_mem_reg;
+	}
+
+	pci_set_master(pdev);
+
+	alloc_size = sizeof(struct enetc_si);
+	if (sizeof_priv) {
+		/* align priv to 32B */
+		alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
+		alloc_size += sizeof_priv;
+	}
+	/* force 32B alignment for enetc_si */
+	alloc_size += ENETC_SI_ALIGN - 1;
+
+	p = kzalloc(alloc_size, GFP_KERNEL);
+	if (!p) {
+		err = -ENOMEM;
+		goto err_alloc_si;
+	}
+
+	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
+	si->pad = (char *)si - (char *)p;
+
+	pci_set_drvdata(pdev, si);
+	si->pdev = pdev;
+	hw = &si->hw;
+
+	len = pci_resource_len(pdev, ENETC_BAR_REGS);
+	hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+	if (!hw->reg) {
+		err = -ENXIO;
+		dev_err(&pdev->dev, "ioremap() failed\n");
+		goto err_ioremap;
+	}
+	if (len > ENETC_PORT_BASE)
+		hw->port = hw->reg + ENETC_PORT_BASE;
+	if (len > ENETC_GLOBAL_BASE)
+		hw->global = hw->reg + ENETC_GLOBAL_BASE;
+
+	enetc_detect_errata(si);
+
+	return 0;
+
+err_ioremap:
+	enetc_kfree_si(si);
+err_alloc_si:
+	pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+void enetc_pci_remove(struct pci_dev *pdev)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_hw *hw = &si->hw;
+
+	iounmap(hw->reg);
+	enetc_kfree_si(si);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
new file mode 100644
index 0000000..541b4e2
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+#include "enetc_hw.h"
+
+#define ENETC_MAC_MAXFRM_SIZE	9600
+#define ENETC_MAX_MTU		(ENETC_MAC_MAXFRM_SIZE - \
+				(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+
+struct enetc_tx_swbd {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	u16 len;
+	u8 is_dma_page:1;
+	u8 check_wb:1;
+	u8 do_tstamp:1;
+};
+
+#define ENETC_RX_MAXFRM_SIZE	ENETC_MAC_MAXFRM_SIZE
+#define ENETC_RXB_TRUESIZE	2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_PAD		NET_SKB_PAD /* add extra space if needed */
+#define ENETC_RXB_DMA_SIZE	\
+	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+
+struct enetc_rx_swbd {
+	dma_addr_t dma;
+	struct page *page;
+	u16 page_offset;
+};
+
+struct enetc_ring_stats {
+	unsigned int packets;
+	unsigned int bytes;
+	unsigned int rx_alloc_errs;
+};
+
+#define ENETC_BDR_DEFAULT_SIZE	1024
+#define ENETC_DEFAULT_TX_WORK	256
+
+struct enetc_bdr {
+	struct device *dev; /* for DMA mapping */
+	struct net_device *ndev;
+	void *bd_base; /* points to Rx or Tx BD ring */
+	union {
+		void __iomem *tpir;
+		void __iomem *rcir;
+	};
+	u16 index;
+	int bd_count; /* # of BDs */
+	int next_to_use;
+	int next_to_clean;
+	union {
+		struct enetc_tx_swbd *tx_swbd;
+		struct enetc_rx_swbd *rx_swbd;
+	};
+	union {
+		void __iomem *tcir; /* Tx */
+		int next_to_alloc; /* Rx */
+	};
+	void __iomem *idr; /* Interrupt Detect Register pointer */
+
+	struct enetc_ring_stats stats;
+
+	dma_addr_t bd_dma_base;
+} ____cacheline_aligned_in_smp;
+
+static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
+{
+	if (unlikely(++*i == bdr->bd_count))
+		*i = 0;
+}
+
+static inline int enetc_bd_unused(struct enetc_bdr *bdr)
+{
+	if (bdr->next_to_clean > bdr->next_to_use)
+		return bdr->next_to_clean - bdr->next_to_use - 1;
+
+	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
+}
+
+/* Control BD ring */
+#define ENETC_CBDR_DEFAULT_SIZE	64
+struct enetc_cbdr {
+	void *bd_base; /* points to Rx or Tx BD ring */
+	void __iomem *pir;
+	void __iomem *cir;
+
+	int bd_count; /* # of BDs */
+	int next_to_use;
+	int next_to_clean;
+
+	dma_addr_t bd_dma_base;
+};
+
+#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
+#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+struct enetc_msg_swbd {
+	void *vaddr;
+	dma_addr_t dma;
+	int size;
+};
+
+#define ENETC_REV1	0x1
+enum enetc_errata {
+	ENETC_ERR_TXCSUM	= BIT(0),
+	ENETC_ERR_VLAN_ISOL	= BIT(1),
+	ENETC_ERR_UCMCSWP	= BIT(2),
+};
+
+/* PCI IEP device data */
+struct enetc_si {
+	struct pci_dev *pdev;
+	struct enetc_hw hw;
+	enum enetc_errata errata;
+
+	struct net_device *ndev; /* back ref. */
+
+	struct enetc_cbdr cbd_ring;
+
+	int num_rx_rings; /* how many rings are available in the SI */
+	int num_tx_rings;
+	int num_fs_entries;
+	int num_rss; /* number of RSS buckets */
+	unsigned short pad;
+};
+
+#define ENETC_SI_ALIGN	32
+
+static inline void *enetc_si_priv(const struct enetc_si *si)
+{
+	return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
+}
+
+static inline bool enetc_si_is_pf(struct enetc_si *si)
+{
+	return !!(si->hw.port);
+}
+
+#define ENETC_MAX_NUM_TXQS	8
+#define ENETC_INT_NAME_MAX	(IFNAMSIZ + 8)
+
+struct enetc_int_vector {
+	void __iomem *rbier;
+	void __iomem *tbier_base;
+	unsigned long tx_rings_map;
+	int count_tx_rings;
+	struct napi_struct napi;
+	char name[ENETC_INT_NAME_MAX];
+
+	struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
+	struct enetc_bdr tx_ring[0];
+};
+
+struct enetc_cls_rule {
+	struct ethtool_rx_flow_spec fs;
+	int used;
+};
+
+#define ENETC_MAX_BDR_INT	2 /* fixed to max # of available cpus */
+
+/* TODO: more hardware offloads */
+enum enetc_active_offloads {
+	ENETC_F_RX_TSTAMP	= BIT(0),
+	ENETC_F_TX_TSTAMP	= BIT(1),
+};
+
+struct enetc_ndev_priv {
+	struct net_device *ndev;
+	struct device *dev; /* dma-mapping device */
+	struct enetc_si *si;
+
+	int bdr_int_num; /* number of Rx/Tx ring interrupts */
+	struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
+	u16 num_rx_rings, num_tx_rings;
+	u16 rx_bd_count, tx_bd_count;
+
+	u16 msg_enable;
+	int active_offloads;
+
+	struct enetc_bdr *tx_ring[16];
+	struct enetc_bdr *rx_ring[16];
+
+	struct enetc_cls_rule *cls_rules;
+
+	struct device_node *phy_node;
+	phy_interface_t if_mode;
+};
+
+/* Messaging */
+
+/* VF-PF set primary MAC address message format */
+struct enetc_msg_cmd_set_primary_mac {
+	struct enetc_msg_cmd_header header;
+	struct sockaddr mac;
+};
+
+#define ENETC_CBD(R, i)	(&(((struct enetc_cbd *)((R).bd_base))[i]))
+
+#define ENETC_CBDR_TIMEOUT	1000 /* usecs */
+
+/* PTP driver exports */
+extern int enetc_phc_index;
+
+/* SI common */
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
+void enetc_pci_remove(struct pci_dev *pdev);
+int enetc_alloc_msix(struct enetc_ndev_priv *priv);
+void enetc_free_msix(struct enetc_ndev_priv *priv);
+void enetc_get_si_caps(struct enetc_si *si);
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
+void enetc_free_si_resources(struct enetc_ndev_priv *priv);
+
+int enetc_open(struct net_device *ndev);
+int enetc_close(struct net_device *ndev);
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+int enetc_set_features(struct net_device *ndev,
+		       netdev_features_t features);
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+		   void *type_data);
+
+/* ethtool */
+void enetc_set_ethtool_ops(struct net_device *ndev);
+
+/* control buffer descriptor ring (CBDR) */
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+			    char *mac_addr, int si_map);
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+		       int index);
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
new file mode 100644
index 0000000..de466b7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+static void enetc_clean_cbdr(struct enetc_si *si)
+{
+	struct enetc_cbdr *ring = &si->cbd_ring;
+	struct enetc_cbd *dest_cbd;
+	int i, status;
+
+	i = ring->next_to_clean;
+
+	while (enetc_rd_reg(ring->cir) != i) {
+		dest_cbd = ENETC_CBD(*ring, i);
+		status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
+		if (status)
+			dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
+				 status, dest_cbd->cmd);
+
+		memset(dest_cbd, 0, sizeof(*dest_cbd));
+
+		i = (i + 1) % ring->bd_count;
+	}
+
+	ring->next_to_clean = i;
+}
+
+static int enetc_cbd_unused(struct enetc_cbdr *r)
+{
+	return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
+		r->bd_count;
+}
+
+static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+{
+	struct enetc_cbdr *ring = &si->cbd_ring;
+	int timeout = ENETC_CBDR_TIMEOUT;
+	struct enetc_cbd *dest_cbd;
+	int i;
+
+	if (unlikely(!ring->bd_base))
+		return -EIO;
+
+	if (unlikely(!enetc_cbd_unused(ring)))
+		enetc_clean_cbdr(si);
+
+	i = ring->next_to_use;
+	dest_cbd = ENETC_CBD(*ring, i);
+
+	/* copy command to the ring */
+	*dest_cbd = *cbd;
+	i = (i + 1) % ring->bd_count;
+
+	ring->next_to_use = i;
+	/* let H/W know BD ring has been updated */
+	enetc_wr_reg(ring->pir, i);
+
+	do {
+		if (enetc_rd_reg(ring->cir) == i)
+			break;
+		udelay(10); /* cannot sleep, rtnl_lock() */
+		timeout -= 10;
+	} while (timeout);
+
+	if (!timeout)
+		return -EBUSY;
+
+	enetc_clean_cbdr(si);
+
+	return 0;
+}
+
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
+{
+	struct enetc_cbd cbd;
+
+	memset(&cbd, 0, sizeof(cbd));
+
+	cbd.cls = 1;
+	cbd.status_flags = ENETC_CBD_FLAGS_SF;
+	cbd.index = cpu_to_le16(index);
+
+	return enetc_send_cmd(si, &cbd);
+}
+
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+			    char *mac_addr, int si_map)
+{
+	struct enetc_cbd cbd;
+	u32 upper;
+	u16 lower;
+
+	memset(&cbd, 0, sizeof(cbd));
+
+	/* fill up the "set" descriptor */
+	cbd.cls = 1;
+	cbd.status_flags = ENETC_CBD_FLAGS_SF;
+	cbd.index = cpu_to_le16(index);
+	cbd.opt[3] = cpu_to_le32(si_map);
+	/* enable entry */
+	cbd.opt[0] = cpu_to_le32(BIT(31));
+
+	upper = *(const u32 *)mac_addr;
+	lower = *(const u16 *)(mac_addr + 4);
+	cbd.addr[0] = cpu_to_le32(upper);
+	cbd.addr[1] = cpu_to_le32(lower);
+
+	return enetc_send_cmd(si, &cbd);
+}
+
+#define RFSE_ALIGN	64
+/* Set entry in RFS table */
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+		       int index)
+{
+	struct enetc_cbd cbd = {.cmd = 0};
+	dma_addr_t dma, dma_align;
+	void *tmp, *tmp_align;
+	int err;
+
+	/* fill up the "set" descriptor */
+	cbd.cmd = 0;
+	cbd.cls = 4;
+	cbd.index = cpu_to_le16(index);
+	cbd.length = cpu_to_le16(sizeof(*rfse));
+	cbd.opt[3] = cpu_to_le32(0); /* SI */
+
+	tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+				 &dma, GFP_KERNEL);
+	if (!tmp) {
+		dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
+		return -ENOMEM;
+	}
+
+	dma_align = ALIGN(dma, RFSE_ALIGN);
+	tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
+	memcpy(tmp_align, rfse, sizeof(*rfse));
+
+	cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+	cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+
+	err = enetc_send_cmd(si, &cbd);
+	if (err)
+		dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
+
+	dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+			  tmp, dma);
+
+	return err;
+}
+
+#define RSSE_ALIGN	64
+static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
+			       bool read)
+{
+	struct enetc_cbd cbd = {.cmd = 0};
+	dma_addr_t dma, dma_align;
+	u8 *tmp, *tmp_align;
+	int err, i;
+
+	if (count < RSSE_ALIGN)
+		/* HW only takes in a full 64 entry table */
+		return -EINVAL;
+
+	tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
+				 &dma, GFP_KERNEL);
+	if (!tmp) {
+		dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
+		return -ENOMEM;
+	}
+	dma_align = ALIGN(dma, RSSE_ALIGN);
+	tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
+
+	if (!read)
+		for (i = 0; i < count; i++)
+			tmp_align[i] = (u8)(table[i]);
+
+	/* fill up the descriptor */
+	cbd.cmd = read ? 2 : 1;
+	cbd.cls = 3;
+	cbd.length = cpu_to_le16(count);
+
+	cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+	cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+
+	err = enetc_send_cmd(si, &cbd);
+	if (err)
+		dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
+
+	if (read)
+		for (i = 0; i < count; i++)
+			table[i] = tmp_align[i];
+
+	dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
+
+	return err;
+}
+
+/* Get RSS table */
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+	return enetc_cmd_rss_table(si, table, count, true);
+}
+
+/* Set RSS table */
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+	return enetc_cmd_rss_table(si, (u32 *)table, count, false);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
new file mode 100644
index 0000000..fcb52ef
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/net_tstamp.h>
+#include <linux/module.h>
+#include "enetc.h"
+
+static const u32 enetc_si_regs[] = {
+	ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR,
+	ENETC_SICBDRSR,	ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR,
+	ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1,
+	ENETC_SIUEFDCR
+};
+
+static const u32 enetc_txbdr_regs[] = {
+	ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
+	ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER
+};
+
+static const u32 enetc_rxbdr_regs[] = {
+	ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
+	ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER
+};
+
+static const u32 enetc_port_regs[] = {
+	ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0),
+	ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1,
+	ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0),
+	ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
+};
+
+static int enetc_get_reglen(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	int len;
+
+	len = ARRAY_SIZE(enetc_si_regs);
+	len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings;
+	len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings;
+
+	if (hw->port)
+		len += ARRAY_SIZE(enetc_port_regs);
+
+	len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
+
+	return len;
+}
+
+static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+			   void *regbuf)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	u32 *buf = (u32 *)regbuf;
+	int i, j;
+	u32 addr;
+
+	for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) {
+		*buf++ = enetc_si_regs[i];
+		*buf++ = enetc_rd(hw, enetc_si_regs[i]);
+	}
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) {
+			addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]);
+
+			*buf++ = addr;
+			*buf++ = enetc_rd(hw, addr);
+		}
+	}
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) {
+			addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]);
+
+			*buf++ = addr;
+			*buf++ = enetc_rd(hw, addr);
+		}
+	}
+
+	if (!hw->port)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) {
+		addr = ENETC_PORT_BASE + enetc_port_regs[i];
+		*buf++ = addr;
+		*buf++ = enetc_rd(hw, addr);
+	}
+}
+
+static const struct {
+	int reg;
+	char name[ETH_GSTRING_LEN];
+} enetc_si_counters[] =  {
+	{ ENETC_SIROCT, "SI rx octets" },
+	{ ENETC_SIRFRM, "SI rx frames" },
+	{ ENETC_SIRUCA, "SI rx u-cast frames" },
+	{ ENETC_SIRMCA, "SI rx m-cast frames" },
+	{ ENETC_SITOCT, "SI tx octets" },
+	{ ENETC_SITFRM, "SI tx frames" },
+	{ ENETC_SITUCA, "SI tx u-cast frames" },
+	{ ENETC_SITMCA, "SI tx m-cast frames" },
+	{ ENETC_RBDCR(0), "Rx ring  0 discarded frames" },
+	{ ENETC_RBDCR(1), "Rx ring  1 discarded frames" },
+	{ ENETC_RBDCR(2), "Rx ring  2 discarded frames" },
+	{ ENETC_RBDCR(3), "Rx ring  3 discarded frames" },
+	{ ENETC_RBDCR(4), "Rx ring  4 discarded frames" },
+	{ ENETC_RBDCR(5), "Rx ring  5 discarded frames" },
+	{ ENETC_RBDCR(6), "Rx ring  6 discarded frames" },
+	{ ENETC_RBDCR(7), "Rx ring  7 discarded frames" },
+	{ ENETC_RBDCR(8), "Rx ring  8 discarded frames" },
+	{ ENETC_RBDCR(9), "Rx ring  9 discarded frames" },
+	{ ENETC_RBDCR(10), "Rx ring 10 discarded frames" },
+	{ ENETC_RBDCR(11), "Rx ring 11 discarded frames" },
+	{ ENETC_RBDCR(12), "Rx ring 12 discarded frames" },
+	{ ENETC_RBDCR(13), "Rx ring 13 discarded frames" },
+	{ ENETC_RBDCR(14), "Rx ring 14 discarded frames" },
+	{ ENETC_RBDCR(15), "Rx ring 15 discarded frames" },
+};
+
+static const struct {
+	int reg;
+	char name[ETH_GSTRING_LEN];
+} enetc_port_counters[] = {
+	{ ENETC_PM0_REOCT,  "MAC rx ethernet octets" },
+	{ ENETC_PM0_RALN,   "MAC rx alignment errors" },
+	{ ENETC_PM0_RXPF,   "MAC rx valid pause frames" },
+	{ ENETC_PM0_RFRM,   "MAC rx valid frames" },
+	{ ENETC_PM0_RFCS,   "MAC rx fcs errors" },
+	{ ENETC_PM0_RVLAN,  "MAC rx VLAN frames" },
+	{ ENETC_PM0_RERR,   "MAC rx frame errors" },
+	{ ENETC_PM0_RUCA,   "MAC rx unicast frames" },
+	{ ENETC_PM0_RMCA,   "MAC rx multicast frames" },
+	{ ENETC_PM0_RBCA,   "MAC rx broadcast frames" },
+	{ ENETC_PM0_RDRP,   "MAC rx dropped packets" },
+	{ ENETC_PM0_RPKT,   "MAC rx packets" },
+	{ ENETC_PM0_RUND,   "MAC rx undersized packets" },
+	{ ENETC_PM0_R64,    "MAC rx 64 byte packets" },
+	{ ENETC_PM0_R127,   "MAC rx 65-127 byte packets" },
+	{ ENETC_PM0_R255,   "MAC rx 128-255 byte packets" },
+	{ ENETC_PM0_R511,   "MAC rx 256-511 byte packets" },
+	{ ENETC_PM0_R1023,  "MAC rx 512-1023 byte packets" },
+	{ ENETC_PM0_R1518,  "MAC rx 1024-1518 byte packets" },
+	{ ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+	{ ENETC_PM0_ROVR,   "MAC rx oversized packets" },
+	{ ENETC_PM0_RJBR,   "MAC rx jabber packets" },
+	{ ENETC_PM0_RFRG,   "MAC rx fragment packets" },
+	{ ENETC_PM0_RCNP,   "MAC rx control packets" },
+	{ ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
+	{ ENETC_PM0_TEOCT,  "MAC tx ethernet octets" },
+	{ ENETC_PM0_TOCT,   "MAC tx octets" },
+	{ ENETC_PM0_TCRSE,  "MAC tx carrier sense errors" },
+	{ ENETC_PM0_TXPF,   "MAC tx valid pause frames" },
+	{ ENETC_PM0_TFRM,   "MAC tx frames" },
+	{ ENETC_PM0_TFCS,   "MAC tx fcs errors" },
+	{ ENETC_PM0_TVLAN,  "MAC tx VLAN frames" },
+	{ ENETC_PM0_TERR,   "MAC tx frames" },
+	{ ENETC_PM0_TUCA,   "MAC tx unicast frames" },
+	{ ENETC_PM0_TMCA,   "MAC tx multicast frames" },
+	{ ENETC_PM0_TBCA,   "MAC tx broadcast frames" },
+	{ ENETC_PM0_TPKT,   "MAC tx packets" },
+	{ ENETC_PM0_TUND,   "MAC tx undersized packets" },
+	{ ENETC_PM0_T127,   "MAC tx 65-127 byte packets" },
+	{ ENETC_PM0_T1023,  "MAC tx 512-1023 byte packets" },
+	{ ENETC_PM0_T1518,  "MAC tx 1024-1518 byte packets" },
+	{ ENETC_PM0_TCNP,   "MAC tx control packets" },
+	{ ENETC_PM0_TDFR,   "MAC tx deferred packets" },
+	{ ENETC_PM0_TMCOL,  "MAC tx multiple collisions" },
+	{ ENETC_PM0_TSCOL,  "MAC tx single collisions" },
+	{ ENETC_PM0_TLCOL,  "MAC tx late collisions" },
+	{ ENETC_PM0_TECOL,  "MAC tx excessive collisions" },
+	{ ENETC_UFDMF,      "SI MAC nomatch u-cast discards" },
+	{ ENETC_MFDMF,      "SI MAC nomatch m-cast discards" },
+	{ ENETC_PBFDSIR,    "SI MAC nomatch b-cast discards" },
+	{ ENETC_PUFDVFR,    "SI VLAN nomatch u-cast discards" },
+	{ ENETC_PMFDVFR,    "SI VLAN nomatch m-cast discards" },
+	{ ENETC_PBFDVFR,    "SI VLAN nomatch b-cast discards" },
+	{ ENETC_PFDMSAPR,   "SI pruning discarded frames" },
+	{ ENETC_PICDR(0),   "ICM DR0 discarded frames" },
+	{ ENETC_PICDR(1),   "ICM DR1 discarded frames" },
+	{ ENETC_PICDR(2),   "ICM DR2 discarded frames" },
+	{ ENETC_PICDR(3),   "ICM DR3 discarded frames" },
+};
+
+static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
+	"Rx ring %2d frames",
+	"Rx ring %2d alloc errors",
+};
+
+static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
+	"Tx ring %2d frames",
+};
+
+static int enetc_get_sset_count(struct net_device *ndev, int sset)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(enetc_si_counters) +
+			ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+			ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
+			(enetc_si_is_pf(priv->si) ?
+			ARRAY_SIZE(enetc_port_counters) : 0);
+
+	return -EOPNOTSUPP;
+}
+
+static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	u8 *p = data;
+	int i, j;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
+			strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < priv->num_tx_rings; i++) {
+			for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
+				snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
+					 i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		for (i = 0; i < priv->num_rx_rings; i++) {
+			for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
+				snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
+					 i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+
+		if (!enetc_si_is_pf(priv->si))
+			break;
+
+		for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
+			strlcpy(p, enetc_port_counters[i].name,
+				ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static void enetc_get_ethtool_stats(struct net_device *ndev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	int i, o = 0;
+
+	for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+		data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		data[o++] = priv->tx_ring[i]->stats.packets;
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		data[o++] = priv->rx_ring[i]->stats.packets;
+		data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
+	}
+
+	if (!enetc_si_is_pf(priv->si))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+		data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+}
+
+#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
+			  RXH_IP_DST)
+#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+{
+	static const u32 rsshash[] = {
+			[TCP_V4_FLOW]    = ENETC_RSSHASH_L4,
+			[UDP_V4_FLOW]    = ENETC_RSSHASH_L4,
+			[SCTP_V4_FLOW]   = ENETC_RSSHASH_L4,
+			[AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3,
+			[IPV4_FLOW]      = ENETC_RSSHASH_L3,
+			[TCP_V6_FLOW]    = ENETC_RSSHASH_L4,
+			[UDP_V6_FLOW]    = ENETC_RSSHASH_L4,
+			[SCTP_V6_FLOW]   = ENETC_RSSHASH_L4,
+			[AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3,
+			[IPV6_FLOW]      = ENETC_RSSHASH_L3,
+			[ETHER_FLOW]     = 0,
+	};
+
+	if (rxnfc->flow_type >= ARRAY_SIZE(rsshash))
+		return -EINVAL;
+
+	rxnfc->data = rsshash[rxnfc->flow_type];
+
+	return 0;
+}
+
+/* current HW spec does byte reversal on everything including MAC addresses */
+static void ether_addr_copy_swap(u8 *dst, const u8 *src)
+{
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		dst[i] = src[ETH_ALEN - i - 1];
+}
+
+static int enetc_set_cls_entry(struct enetc_si *si,
+			       struct ethtool_rx_flow_spec *fs, bool en)
+{
+	struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
+	struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m;
+	struct ethhdr *eth_h, *eth_m;
+	struct enetc_cmd_rfse rfse = { {0} };
+
+	if (!en)
+		goto done;
+
+	switch (fs->flow_type & 0xff) {
+	case TCP_V4_FLOW:
+		l4ip4_h = &fs->h_u.tcp_ip4_spec;
+		l4ip4_m = &fs->m_u.tcp_ip4_spec;
+		goto l4ip4;
+	case UDP_V4_FLOW:
+		l4ip4_h = &fs->h_u.udp_ip4_spec;
+		l4ip4_m = &fs->m_u.udp_ip4_spec;
+		goto l4ip4;
+	case SCTP_V4_FLOW:
+		l4ip4_h = &fs->h_u.sctp_ip4_spec;
+		l4ip4_m = &fs->m_u.sctp_ip4_spec;
+l4ip4:
+		rfse.sip_h[0] = l4ip4_h->ip4src;
+		rfse.sip_m[0] = l4ip4_m->ip4src;
+		rfse.dip_h[0] = l4ip4_h->ip4dst;
+		rfse.dip_m[0] = l4ip4_m->ip4dst;
+		rfse.sport_h = ntohs(l4ip4_h->psrc);
+		rfse.sport_m = ntohs(l4ip4_m->psrc);
+		rfse.dport_h = ntohs(l4ip4_h->pdst);
+		rfse.dport_m = ntohs(l4ip4_m->pdst);
+		if (l4ip4_m->tos)
+			netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+		rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+		rfse.ethtype_m = 0xffff;
+		break;
+	case IP_USER_FLOW:
+		l3ip4_h = &fs->h_u.usr_ip4_spec;
+		l3ip4_m = &fs->m_u.usr_ip4_spec;
+
+		rfse.sip_h[0] = l3ip4_h->ip4src;
+		rfse.sip_m[0] = l3ip4_m->ip4src;
+		rfse.dip_h[0] = l3ip4_h->ip4dst;
+		rfse.dip_m[0] = l3ip4_m->ip4dst;
+		if (l3ip4_m->tos)
+			netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+		rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+		rfse.ethtype_m = 0xffff;
+		break;
+	case ETHER_FLOW:
+		eth_h = &fs->h_u.ether_spec;
+		eth_m = &fs->m_u.ether_spec;
+
+		ether_addr_copy_swap(rfse.smac_h, eth_h->h_source);
+		ether_addr_copy_swap(rfse.smac_m, eth_m->h_source);
+		ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest);
+		ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest);
+		rfse.ethtype_h = ntohs(eth_h->h_proto);
+		rfse.ethtype_m = ntohs(eth_m->h_proto);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	rfse.mode |= ENETC_RFSE_EN;
+	if (fs->ring_cookie != RX_CLS_FLOW_DISC) {
+		rfse.mode |= ENETC_RFSE_MODE_BD;
+		rfse.result = fs->ring_cookie;
+	}
+done:
+	return enetc_set_fs_entry(si, &rfse, fs->location);
+}
+
+static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+			   u32 *rule_locs)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	int i, j;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_GRXRINGS:
+		rxnfc->data = priv->num_rx_rings;
+		break;
+	case ETHTOOL_GRXFH:
+		/* get RSS hash config */
+		return enetc_get_rsshash(rxnfc);
+	case ETHTOOL_GRXCLSRLCNT:
+		/* total number of entries */
+		rxnfc->data = priv->si->num_fs_entries;
+		/* number of entries in use */
+		rxnfc->rule_cnt = 0;
+		for (i = 0; i < priv->si->num_fs_entries; i++)
+			if (priv->cls_rules[i].used)
+				rxnfc->rule_cnt++;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		if (rxnfc->fs.location >= priv->si->num_fs_entries)
+			return -EINVAL;
+
+		/* get entry x */
+		rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		/* total number of entries */
+		rxnfc->data = priv->si->num_fs_entries;
+		/* array of indexes of used entries */
+		j = 0;
+		for (i = 0; i < priv->si->num_fs_entries; i++) {
+			if (!priv->cls_rules[i].used)
+				continue;
+			if (j == rxnfc->rule_cnt)
+				return -EMSGSIZE;
+			rule_locs[j++] = i;
+		}
+		/* number of entries in use */
+		rxnfc->rule_cnt = j;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	int err;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		if (rxnfc->fs.location >= priv->si->num_fs_entries)
+			return -EINVAL;
+
+		if (rxnfc->fs.ring_cookie >= priv->num_rx_rings &&
+		    rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC)
+			return -EINVAL;
+
+		err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true);
+		if (err)
+			return err;
+		priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs;
+		priv->cls_rules[rxnfc->fs.location].used = 1;
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		if (rxnfc->fs.location >= priv->si->num_fs_entries)
+			return -EINVAL;
+
+		err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false);
+		if (err)
+			return err;
+		priv->cls_rules[rxnfc->fs.location].used = 0;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static u32 enetc_get_rxfh_key_size(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	/* return the size of the RX flow hash key.  PF only */
+	return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0;
+}
+
+static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	/* return the size of the RX flow hash indirection table */
+	return priv->si->num_rss;
+}
+
+static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
+			  u8 *hfunc)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	int err = 0, i;
+
+	/* return hash function */
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+
+	/* return hash key */
+	if (key && hw->port)
+		for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+			((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+
+	/* return RSS table */
+	if (indir)
+		err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+
+	return err;
+}
+
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+{
+	int i;
+
+	for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+		enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+}
+
+static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	int err = 0;
+
+	/* set hash key, if PF */
+	if (key && hw->port)
+		enetc_set_rss_key(hw, key);
+
+	/* set RSS table */
+	if (indir)
+		err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+
+	return err;
+}
+
+static void enetc_get_ringparam(struct net_device *ndev,
+				struct ethtool_ringparam *ring)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	ring->rx_pending = priv->rx_bd_count;
+	ring->tx_pending = priv->tx_bd_count;
+
+	/* do some h/w sanity checks for BDR length */
+	if (netif_running(ndev)) {
+		struct enetc_hw *hw = &priv->si->hw;
+		u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR);
+
+		if (val != priv->rx_bd_count)
+			netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val);
+
+		val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR);
+
+		if (val != priv->tx_bd_count)
+			netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val);
+	}
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+			     struct ethtool_ts_info *info)
+{
+	int *phc_idx;
+
+	phc_idx = symbol_get(enetc_phc_index);
+	if (phc_idx) {
+		info->phc_index = *phc_idx;
+		symbol_put(enetc_phc_index);
+	} else {
+		info->phc_index = -1;
+	}
+
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+			 (1 << HWTSTAMP_TX_ON);
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_ALL);
+#else
+	info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE;
+#endif
+	return 0;
+}
+
+static const struct ethtool_ops enetc_pf_ethtool_ops = {
+	.get_regs_len = enetc_get_reglen,
+	.get_regs = enetc_get_regs,
+	.get_sset_count = enetc_get_sset_count,
+	.get_strings = enetc_get_strings,
+	.get_ethtool_stats = enetc_get_ethtool_stats,
+	.get_rxnfc = enetc_get_rxnfc,
+	.set_rxnfc = enetc_set_rxnfc,
+	.get_rxfh_key_size = enetc_get_rxfh_key_size,
+	.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+	.get_rxfh = enetc_get_rxfh,
+	.set_rxfh = enetc_set_rxfh,
+	.get_ringparam = enetc_get_ringparam,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
+	.get_link = ethtool_op_get_link,
+	.get_ts_info = enetc_get_ts_info,
+};
+
+static const struct ethtool_ops enetc_vf_ethtool_ops = {
+	.get_regs_len = enetc_get_reglen,
+	.get_regs = enetc_get_regs,
+	.get_sset_count = enetc_get_sset_count,
+	.get_strings = enetc_get_strings,
+	.get_ethtool_stats = enetc_get_ethtool_stats,
+	.get_rxnfc = enetc_get_rxnfc,
+	.set_rxnfc = enetc_set_rxnfc,
+	.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+	.get_rxfh = enetc_get_rxfh,
+	.set_rxfh = enetc_set_rxfh,
+	.get_ringparam = enetc_get_ringparam,
+	.get_link = ethtool_op_get_link,
+	.get_ts_info = enetc_get_ts_info,
+};
+
+void enetc_set_ethtool_ops(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	if (enetc_si_is_pf(priv->si))
+		ndev->ethtool_ops = &enetc_pf_ethtool_ops;
+	else
+		ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
new file mode 100644
index 0000000..8827629
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/bitops.h>
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_PF		0xe100
+#define ENETC_DEV_ID_VF		0xef00
+#define ENETC_DEV_ID_PTP	0xee02
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS	0
+
+/** SI regs, offset: 0h */
+#define ENETC_SIMR	0
+#define ENETC_SIMR_EN	BIT(31)
+#define ENETC_SIMR_RSSE	BIT(0)
+#define ENETC_SICTR0	0x18
+#define ENETC_SICTR1	0x1c
+#define ENETC_SIPCAPR0	0x20
+#define ENETC_SIPCAPR0_RSS	BIT(8)
+#define ENETC_SIPCAPR1	0x24
+#define ENETC_SITGTGR	0x30
+#define ENETC_SIRBGCR	0x38
+/* cache attribute registers for transactions initiated by ENETC */
+#define ENETC_SICAR0	0x40
+#define ENETC_SICAR1	0x44
+#define ENETC_SICAR2	0x48
+/* rd snoop, no alloc
+ * wr snoop, no alloc, partial cache line update for BDs and full cache line
+ * update for data
+ */
+#define ENETC_SICAR_RD_COHERENT	0x2b2b0000
+#define ENETC_SICAR_WR_COHERENT	0x00006727
+#define ENETC_SICAR_MSI	0x00300030 /* rd/wr device, no snoop, no alloc */
+
+#define ENETC_SIPMAR0	0x80
+#define ENETC_SIPMAR1	0x84
+
+/* VF-PF Message passing */
+#define ENETC_DEFAULT_MSG_SIZE	1024	/* and max size */
+/* msg size encoding: default and max msg value of 1024B encoded as 0 */
+static inline u32 enetc_vsi_set_msize(u32 size)
+{
+	return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0;
+}
+
+#define ENETC_PSIMSGRR	0x204
+#define ENETC_PSIMSGRR_MR_MASK	GENMASK(2, 1)
+#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR0(n)	(0x210 + (n) * 0x8) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR1(n)	(0x214 + (n) * 0x8)
+
+#define ENETC_VSIMSGSR	0x204	/* RO */
+#define ENETC_VSIMSGSR_MB	BIT(0)
+#define ENETC_VSIMSGSR_MS	BIT(1)
+#define ENETC_VSIMSGSNDAR0	0x210
+#define ENETC_VSIMSGSNDAR1	0x214
+
+#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16)
+#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16)
+
+/* SI statistics */
+#define ENETC_SIROCT	0x300
+#define ENETC_SIRFRM	0x308
+#define ENETC_SIRUCA	0x310
+#define ENETC_SIRMCA	0x318
+#define ENETC_SITOCT	0x320
+#define ENETC_SITFRM	0x328
+#define ENETC_SITUCA	0x330
+#define ENETC_SITMCA	0x338
+#define ENETC_RBDCR(n)	(0x8180 + (n) * 0x200)
+
+/* Control BDR regs */
+#define ENETC_SICBDRMR		0x800
+#define ENETC_SICBDRSR		0x804	/* RO */
+#define ENETC_SICBDRBAR0	0x810
+#define ENETC_SICBDRBAR1	0x814
+#define ENETC_SICBDRPIR		0x818
+#define ENETC_SICBDRCIR		0x81c
+#define ENETC_SICBDRLENR	0x820
+
+#define ENETC_SICAPR0	0x900
+#define ENETC_SICAPR1	0x904
+
+#define ENETC_PSIIER	0xa00
+#define ENETC_PSIIER_MR_MASK	GENMASK(2, 1)
+#define ENETC_PSIIDR	0xa08
+#define ENETC_SITXIDR	0xa18
+#define ENETC_SIRXIDR	0xa28
+#define ENETC_SIMSIVR	0xa30
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SIUEFDCR	0xe28
+
+#define ENETC_SIRFSCAPR	0x1200
+#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f)
+#define ENETC_SIRSSCAPR	0x1600
+#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+
+/** SI BDR sub-blocks, n = 0..7 */
+enum enetc_bdr_type {TX, RX};
+#define ENETC_BDR_OFF(i)	((i) * 0x200)
+#define ENETC_BDR(t, i, r)	(0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
+/* RX BDR reg offsets */
+#define ENETC_RBMR	0
+#define ENETC_RBMR_BDS	BIT(2)
+#define ENETC_RBMR_VTE	BIT(5)
+#define ENETC_RBMR_EN	BIT(31)
+#define ENETC_RBSR	0x4
+#define ENETC_RBBSR	0x8
+#define ENETC_RBCIR	0xc
+#define ENETC_RBBAR0	0x10
+#define ENETC_RBBAR1	0x14
+#define ENETC_RBPIR	0x18
+#define ENETC_RBLENR	0x20
+#define ENETC_RBIER	0xa0
+#define ENETC_RBIER_RXTIE	BIT(0)
+#define ENETC_RBIDR	0xa4
+#define ENETC_RBICIR0	0xa8
+#define ENETC_RBICIR0_ICEN	BIT(31)
+
+/* TX BDR reg offsets */
+#define ENETC_TBMR	0
+#define ENETC_TBSR_BUSY	BIT(0)
+#define ENETC_TBMR_VIH	BIT(9)
+#define ENETC_TBMR_PRIO_MASK		GENMASK(2, 0)
+#define ENETC_TBMR_SET_PRIO(val)	((val) & ENETC_TBMR_PRIO_MASK)
+#define ENETC_TBMR_EN	BIT(31)
+#define ENETC_TBSR	0x4
+#define ENETC_TBBAR0	0x10
+#define ENETC_TBBAR1	0x14
+#define ENETC_TBPIR	0x18
+#define ENETC_TBCIR	0x1c
+#define ENETC_TBCIR_IDX_MASK	0xffff
+#define ENETC_TBLENR	0x20
+#define ENETC_TBIER	0xa0
+#define ENETC_TBIER_TXTIE	BIT(0)
+#define ENETC_TBIDR	0xa4
+#define ENETC_TBICIR0	0xa8
+#define ENETC_TBICIR0_ICEN	BIT(31)
+
+#define ENETC_RTBLENR_LEN(n)	((n) & ~0x7)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE		0x10000
+#define ENETC_PMR		0x0000
+#define ENETC_PMR_EN	GENMASK(18, 16)
+#define ENETC_PSR		0x0004 /* RO */
+#define ENETC_PSIPMR		0x0018
+#define ENETC_PSIPMR_SET_UP(n)	BIT(n) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n)	BIT((n) + 16)
+#define ENETC_PSIPVMR		0x001c
+#define ENETC_VLAN_PROMISC_MAP_ALL	0x7
+#define ENETC_PSIPVMR_SET_VP(simap)	((simap) & 0x7)
+#define ENETC_PSIPVMR_SET_VUTA(simap)	(((simap) & 0x7) << 16)
+#define ENETC_PSIPMAR0(n)	(0x0100 + (n) * 0x8) /* n = SI index */
+#define ENETC_PSIPMAR1(n)	(0x0104 + (n) * 0x8)
+#define ENETC_PVCLCTR		0x0208
+#define ENETC_VLAN_TYPE_C	BIT(0)
+#define ENETC_VLAN_TYPE_S	BIT(1)
+#define ENETC_PVCLCTR_OVTPIDL(bmp)	((bmp) & 0xff) /* VLAN_TYPE */
+#define ENETC_PSIVLANR(n)	(0x0240 + (n) * 4) /* n = SI index */
+#define ENETC_PSIVLAN_EN	BIT(31)
+#define ENETC_PSIVLAN_SET_QOS(val)	((u32)(val) << 12)
+#define ENETC_PTXMBAR		0x0608
+#define ENETC_PCAPR0		0x0900
+#define ENETC_PCAPR0_RXBDR(val)	((val) >> 24)
+#define ENETC_PCAPR0_TXBDR(val)	(((val) >> 16) & 0xff)
+#define ENETC_PCAPR1		0x0904
+#define ENETC_PSICFGR0(n)	(0x0940 + (n) * 0xc)  /* n = SI index */
+#define ENETC_PSICFGR0_SET_TXBDR(val)	((val) & 0xff)
+#define ENETC_PSICFGR0_SET_RXBDR(val)	(((val) & 0xff) << 16)
+#define ENETC_PSICFGR0_VTE	BIT(12)
+#define ENETC_PSICFGR0_SIVIE	BIT(14)
+#define ENETC_PSICFGR0_ASE	BIT(15)
+#define ENETC_PSICFGR0_SIVC(bmp)	(((bmp) & 0xff) << 24) /* VLAN_TYPE */
+
+#define ENETC_PTCCBSR0(n)	(0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_PTCCBSR1(n)	(0x1114 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_RSSHASH_KEY_SIZE	40
+#define ENETC_PRSSK(n)		(0x1410 + (n) * 4) /* n = [0..9] */
+#define ENETC_PSIVLANFMR	0x1700
+#define ENETC_PSIVLANFMR_VS	BIT(0)
+#define ENETC_PRFSMR		0x1800
+#define ENETC_PRFSMR_RFSE	BIT(31)
+#define ENETC_PRFSCAPR		0x1804
+#define ENETC_PRFSCAPR_GET_NUM_RFS(val)	((((val) & 0xf) + 1) * 16)
+#define ENETC_PSIRFSCFGR(n)	(0x1814 + (n) * 4) /* n = SI index */
+#define ENETC_PFPMR		0x1900
+#define ENETC_PFPMR_PMACE	BIT(1)
+#define ENETC_PFPMR_MWLM	BIT(0)
+#define ENETC_PSIUMHFR0(n, err)	(((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
+#define ENETC_PSIUMHFR1(n)	(0x1d04 + (n) * 0x10)
+#define ENETC_PSIMMHFR0(n, err)	(((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
+#define ENETC_PSIMMHFR1(n)	(0x1d0c + (n) * 0x10)
+#define ENETC_PSIVHFR0(n)	(0x1e00 + (n) * 8) /* n = SI index */
+#define ENETC_PSIVHFR1(n)	(0x1e04 + (n) * 8) /* n = SI index */
+#define ENETC_MMCSR		0x1f00
+#define ENETC_MMCSR_ME		BIT(16)
+#define ENETC_PTCMSDUR(n)	(0x2020 + (n) * 4) /* n = TC index [0..7] */
+
+#define ENETC_PM0_CMD_CFG	0x8008
+#define ENETC_PM1_CMD_CFG	0x9008
+#define ENETC_PM0_TX_EN		BIT(0)
+#define ENETC_PM0_RX_EN		BIT(1)
+#define ENETC_PM0_PROMISC	BIT(4)
+#define ENETC_PM0_CMD_XGLP	BIT(10)
+#define ENETC_PM0_CMD_TXP	BIT(11)
+#define ENETC_PM0_CMD_PHY_TX_EN	BIT(15)
+#define ENETC_PM0_CMD_SFD	BIT(21)
+#define ENETC_PM0_MAXFRM	0x8014
+#define ENETC_SET_TX_MTU(val)	((val) << 16)
+#define ENETC_SET_MAXFRM(val)	((val) & 0xffff)
+#define ENETC_PM0_IF_MODE	0x8300
+#define ENETC_PMO_IFM_RG	BIT(2)
+#define ENETC_PM0_IFM_RLP	(BIT(5) | BIT(11))
+#define ENETC_PM0_IFM_RGAUTO	(BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
+#define ENETC_PM0_IFM_XGMII	BIT(12)
+
+/* MAC counters */
+#define ENETC_PM0_REOCT		0x8100
+#define ENETC_PM0_RALN		0x8110
+#define ENETC_PM0_RXPF		0x8118
+#define ENETC_PM0_RFRM		0x8120
+#define ENETC_PM0_RFCS		0x8128
+#define ENETC_PM0_RVLAN		0x8130
+#define ENETC_PM0_RERR		0x8138
+#define ENETC_PM0_RUCA		0x8140
+#define ENETC_PM0_RMCA		0x8148
+#define ENETC_PM0_RBCA		0x8150
+#define ENETC_PM0_RDRP		0x8158
+#define ENETC_PM0_RPKT		0x8160
+#define ENETC_PM0_RUND		0x8168
+#define ENETC_PM0_R64		0x8170
+#define ENETC_PM0_R127		0x8178
+#define ENETC_PM0_R255		0x8180
+#define ENETC_PM0_R511		0x8188
+#define ENETC_PM0_R1023		0x8190
+#define ENETC_PM0_R1518		0x8198
+#define ENETC_PM0_R1519X	0x81A0
+#define ENETC_PM0_ROVR		0x81A8
+#define ENETC_PM0_RJBR		0x81B0
+#define ENETC_PM0_RFRG		0x81B8
+#define ENETC_PM0_RCNP		0x81C0
+#define ENETC_PM0_RDRNTP	0x81C8
+#define ENETC_PM0_TEOCT		0x8200
+#define ENETC_PM0_TOCT		0x8208
+#define ENETC_PM0_TCRSE		0x8210
+#define ENETC_PM0_TXPF		0x8218
+#define ENETC_PM0_TFRM		0x8220
+#define ENETC_PM0_TFCS		0x8228
+#define ENETC_PM0_TVLAN		0x8230
+#define ENETC_PM0_TERR		0x8238
+#define ENETC_PM0_TUCA		0x8240
+#define ENETC_PM0_TMCA		0x8248
+#define ENETC_PM0_TBCA		0x8250
+#define ENETC_PM0_TPKT		0x8260
+#define ENETC_PM0_TUND		0x8268
+#define ENETC_PM0_T127		0x8278
+#define ENETC_PM0_T1023		0x8290
+#define ENETC_PM0_T1518		0x8298
+#define ENETC_PM0_TCNP		0x82C0
+#define ENETC_PM0_TDFR		0x82D0
+#define ENETC_PM0_TMCOL		0x82D8
+#define ENETC_PM0_TSCOL		0x82E0
+#define ENETC_PM0_TLCOL		0x82E8
+#define ENETC_PM0_TECOL		0x82F0
+
+/* Port counters */
+#define ENETC_PICDR(n)		(0x0700 + (n) * 8) /* n = [0..3] */
+#define ENETC_PBFDSIR		0x0810
+#define ENETC_PFDMSAPR		0x0814
+#define ENETC_UFDMF		0x1680
+#define ENETC_MFDMF		0x1684
+#define ENETC_PUFDVFR		0x1780
+#define ENETC_PMFDVFR		0x1784
+#define ENETC_PBFDVFR		0x1788
+
+/** Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE	0x20000
+#define ENETC_G_EIPBRR0		0x0bf8
+#define ENETC_G_EIPBRR1		0x0bfc
+#define ENETC_G_EPFBLPR(n)	(0xd00 + 4 * (n))
+#define ENETC_G_EPFBLPR1_XGMII	0x80000000
+
+/* PCI device info */
+struct enetc_hw {
+	/* SI registers, used by all PCI functions */
+	void __iomem *reg;
+	/* Port registers, PF only */
+	void __iomem *port;
+	/* IP global registers, PF only */
+	void __iomem *global;
+};
+
+/* general register accessors */
+#define enetc_rd_reg(reg)	ioread32((reg))
+#define enetc_wr_reg(reg, val)	iowrite32((val), (reg))
+#ifdef ioread64
+#define enetc_rd_reg64(reg)	ioread64((reg))
+#else
+/* using this to read out stats on 32b systems */
+static inline u64 enetc_rd_reg64(void __iomem *reg)
+{
+	u32 low, high, tmp;
+
+	do {
+		high = ioread32(reg + 4);
+		low = ioread32(reg);
+		tmp = ioread32(reg + 4);
+	} while (high != tmp);
+
+	return le64_to_cpu((__le64)high << 32 | low);
+}
+#endif
+
+#define enetc_rd(hw, off)		enetc_rd_reg((hw)->reg + (off))
+#define enetc_wr(hw, off, val)		enetc_wr_reg((hw)->reg + (off), val)
+#define enetc_rd64(hw, off)		enetc_rd_reg64((hw)->reg + (off))
+/* port register accessors - PF only */
+#define enetc_port_rd(hw, off)		enetc_rd_reg((hw)->port + (off))
+#define enetc_port_wr(hw, off, val)	enetc_wr_reg((hw)->port + (off), val)
+/* global register accessors - PF only */
+#define enetc_global_rd(hw, off)	enetc_rd_reg((hw)->global + (off))
+#define enetc_global_wr(hw, off, val)	enetc_wr_reg((hw)->global + (off), val)
+/* BDR register accessors, see ENETC_BDR() */
+#define enetc_bdr_rd(hw, t, n, off) \
+				enetc_rd(hw, ENETC_BDR(t, n, off))
+#define enetc_bdr_wr(hw, t, n, off, val) \
+				enetc_wr(hw, ENETC_BDR(t, n, off), val)
+#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
+#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
+#define enetc_txbdr_wr(hw, n, off, val) \
+				enetc_bdr_wr(hw, TX, n, off, val)
+#define enetc_rxbdr_wr(hw, n, off, val) \
+				enetc_bdr_wr(hw, RX, n, off, val)
+
+/* Buffer Descriptors (BD) */
+union enetc_tx_bd {
+	struct {
+		__le64 addr;
+		__le16 buf_len;
+		__le16 frm_len;
+		union {
+			struct {
+				__le16 l3_csoff;
+				u8 l4_csoff;
+				u8 flags;
+			}; /* default layout */
+			__le32 lstatus;
+		};
+	};
+	struct {
+		__le32 tstamp;
+		__le16 tpid;
+		__le16 vid;
+		u8 reserved[6];
+		u8 e_flags;
+		u8 flags;
+	} ext; /* Tx BD extension */
+	struct {
+		__le32 tstamp;
+		u8 reserved[10];
+		u8 status;
+		u8 flags;
+	} wb; /* writeback descriptor */
+};
+
+#define ENETC_TXBD_FLAGS_L4CS	BIT(0)
+#define ENETC_TXBD_FLAGS_W	BIT(2)
+#define ENETC_TXBD_FLAGS_CSUM	BIT(3)
+#define ENETC_TXBD_FLAGS_EX	BIT(6)
+#define ENETC_TXBD_FLAGS_F	BIT(7)
+
+static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
+{
+	memset(txbd, 0, sizeof(*txbd));
+}
+
+/* L3 csum flags */
+#define ENETC_TXBD_L3_IPCS	BIT(7)
+#define ENETC_TXBD_L3_IPV6	BIT(15)
+
+#define ENETC_TXBD_L3_START_MASK	GENMASK(6, 0)
+#define ENETC_TXBD_L3_SET_HSIZE(val)	((((val) >> 2) & 0x7f) << 8)
+
+/* Extension flags */
+#define ENETC_TXBD_E_FLAGS_VLAN_INS	BIT(0)
+#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP	BIT(2)
+
+static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
+{
+	return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
+			   (start & ENETC_TXBD_L3_START_MASK));
+}
+
+/* L4 csum flags */
+#define ENETC_TXBD_L4_UDP	BIT(5)
+#define ENETC_TXBD_L4_TCP	BIT(6)
+
+union enetc_rx_bd {
+	struct {
+		__le64 addr;
+		u8 reserved[8];
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+		u8 reserved1[16];
+#endif
+	} w;
+	struct {
+		__le16 inet_csum;
+		__le16 parse_summary;
+		__le32 rss_hash;
+		__le16 buf_len;
+		__le16 vlan_opt;
+		union {
+			struct {
+				__le16 flags;
+				__le16 error;
+			};
+			__le32 lstatus;
+		};
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+		__le32 tstamp;
+		u8 reserved[12];
+#endif
+	} r;
+};
+
+#define ENETC_RXBD_LSTATUS_R	BIT(30)
+#define ENETC_RXBD_LSTATUS_F	BIT(31)
+#define ENETC_RXBD_ERR_MASK	0xff
+#define ENETC_RXBD_LSTATUS(flags)	((flags) << 16)
+#define ENETC_RXBD_FLAG_VLAN	BIT(9)
+#define ENETC_RXBD_FLAG_TSTMP	BIT(10)
+
+#define ENETC_MAC_ADDR_FILT_CNT	8 /* # of supported entries per port */
+#define EMETC_MAC_ADDR_FILT_RES	3 /* # of reserved entries at the beginning */
+#define ENETC_MAX_NUM_VFS	2
+
+struct enetc_cbd {
+	union {
+		struct {
+			__le32 addr[2];
+			__le32 opt[4];
+		};
+		__le32 data[6];
+	};
+	__le16 index;
+	__le16 length;
+	u8 cmd;
+	u8 cls;
+	u8 _res;
+	u8 status_flags;
+};
+
+#define ENETC_CBD_FLAGS_SF	BIT(7) /* short format */
+#define ENETC_CBD_STATUS_MASK	0xf
+
+struct enetc_cmd_rfse {
+	u8 smac_h[6];
+	u8 smac_m[6];
+	u8 dmac_h[6];
+	u8 dmac_m[6];
+	u32 sip_h[4];
+	u32 sip_m[4];
+	u32 dip_h[4];
+	u32 dip_m[4];
+	u16 ethtype_h;
+	u16 ethtype_m;
+	u16 ethtype4_h;
+	u16 ethtype4_m;
+	u16 sport_h;
+	u16 sport_m;
+	u16 dport_h;
+	u16 dport_m;
+	u16 vlan_h;
+	u16 vlan_m;
+	u8 proto_h;
+	u8 proto_m;
+	u16 flags;
+	u16 result;
+	u16 mode;
+};
+
+#define ENETC_RFSE_EN	BIT(15)
+#define ENETC_RFSE_MODE_BD	2
+
+static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+{
+	*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
+	*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+}
+
+#define ENETC_SI_INT_IDX	0
+/* base index for Rx/Tx interrupts */
+#define ENETC_BDR_INT_BASE_IDX	1
+
+/* Messaging */
+
+/* Command completion status */
+enum enetc_msg_cmd_status {
+	ENETC_MSG_CMD_STATUS_OK,
+	ENETC_MSG_CMD_STATUS_FAIL
+};
+
+/* VSI-PSI command message types */
+enum enetc_msg_cmd_type {
+	ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */
+	ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */
+	ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */
+};
+
+/* VSI-PSI command action types */
+enum enetc_msg_cmd_action_type {
+	ENETC_MSG_CMD_MNG_ADD = 1,
+	ENETC_MSG_CMD_MNG_REMOVE
+};
+
+/* PSI-VSI command header format */
+struct enetc_msg_cmd_header {
+	u16 type;	/* command class type */
+	u16 id;		/* denotes the specific required action */
+};
+
+/* Common H/W utility functions */
+
+static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
+				       bool en)
+{
+	u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+
+	val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
+	enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+}
+
+static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
+				       bool en)
+{
+	u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+
+	val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
+	enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+}
+
+static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
+				      int prio)
+{
+	u32 val = enetc_txbdr_rd(hw, bdr_idx, ENETC_TBMR);
+
+	val &= ~ENETC_TBMR_PRIO_MASK;
+	val |= ENETC_TBMR_SET_PRIO(prio);
+	enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
new file mode 100644
index 0000000..149883c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+
+#include "enetc_mdio.h"
+
+#define	ENETC_MDIO_REG_OFFSET	0x1c00
+#define	ENETC_MDIO_CFG	0x0	/* MDIO configuration and status */
+#define	ENETC_MDIO_CTL	0x4	/* MDIO control */
+#define	ENETC_MDIO_DATA	0x8	/* MDIO data */
+#define	ENETC_MDIO_ADDR	0xc	/* MDIO address */
+
+#define enetc_mdio_rd(hw, off) \
+	enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
+#define enetc_mdio_wr(hw, off, val) \
+	enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
+#define enetc_mdio_rd_reg(off)	enetc_mdio_rd(hw, off)
+
+#define ENETC_MDC_DIV		258
+
+#define MDIO_CFG_CLKDIV(x)	((((x) >> 1) & 0xff) << 8)
+#define MDIO_CFG_BSY		BIT(0)
+#define MDIO_CFG_RD_ER		BIT(1)
+#define MDIO_CFG_ENC45		BIT(6)
+ /* external MDIO only - driven on neg MDC edge */
+#define MDIO_CFG_NEG		BIT(23)
+
+#define MDIO_CTL_DEV_ADDR(x)	((x) & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)	(((x) & 0x1f) << 5)
+#define MDIO_CTL_READ		BIT(15)
+#define MDIO_DATA(x)		((x) & 0xffff)
+
+#define TIMEOUT	1000
+static int enetc_mdio_wait_complete(struct enetc_hw *hw)
+{
+	u32 val;
+
+	return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
+				  !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+}
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+	struct enetc_mdio_priv *mdio_priv = bus->priv;
+	struct enetc_hw *hw = mdio_priv->hw;
+	u32 mdio_ctl, mdio_cfg;
+	u16 dev_addr;
+	int ret;
+
+	mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+	if (regnum & MII_ADDR_C45) {
+		dev_addr = (regnum >> 16) & 0x1f;
+		mdio_cfg |= MDIO_CFG_ENC45;
+	} else {
+		/* clause 22 (ie 1G) */
+		dev_addr = regnum & 0x1f;
+		mdio_cfg &= ~MDIO_CFG_ENC45;
+	}
+
+	enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+
+	ret = enetc_mdio_wait_complete(hw);
+	if (ret)
+		return ret;
+
+	/* set port and dev addr */
+	mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+	enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+
+	/* set the register address */
+	if (regnum & MII_ADDR_C45) {
+		enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+
+		ret = enetc_mdio_wait_complete(hw);
+		if (ret)
+			return ret;
+	}
+
+	/* write the value */
+	enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
+
+	ret = enetc_mdio_wait_complete(hw);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+	struct enetc_mdio_priv *mdio_priv = bus->priv;
+	struct enetc_hw *hw = mdio_priv->hw;
+	u32 mdio_ctl, mdio_cfg;
+	u16 dev_addr, value;
+	int ret;
+
+	mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+	if (regnum & MII_ADDR_C45) {
+		dev_addr = (regnum >> 16) & 0x1f;
+		mdio_cfg |= MDIO_CFG_ENC45;
+	} else {
+		dev_addr = regnum & 0x1f;
+		mdio_cfg &= ~MDIO_CFG_ENC45;
+	}
+
+	enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+
+	ret = enetc_mdio_wait_complete(hw);
+	if (ret)
+		return ret;
+
+	/* set port and device addr */
+	mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+	enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+
+	/* set the register address */
+	if (regnum & MII_ADDR_C45) {
+		enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+
+		ret = enetc_mdio_wait_complete(hw);
+		if (ret)
+			return ret;
+	}
+
+	/* initiate the read */
+	enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+
+	ret = enetc_mdio_wait_complete(hw);
+	if (ret)
+		return ret;
+
+	/* return all Fs if nothing was there */
+	if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
+		dev_dbg(&bus->dev,
+			"Error while reading PHY%d reg at %d.%hhu\n",
+			phy_id, dev_addr, regnum);
+		return 0xffff;
+	}
+
+	value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
+
+	return value;
+}
+
+int enetc_mdio_probe(struct enetc_pf *pf)
+{
+	struct device *dev = &pf->si->pdev->dev;
+	struct enetc_mdio_priv *mdio_priv;
+	struct device_node *np;
+	struct mii_bus *bus;
+	int err;
+
+	bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+	if (!bus)
+		return -ENOMEM;
+
+	bus->name = "Freescale ENETC MDIO Bus";
+	bus->read = enetc_mdio_read;
+	bus->write = enetc_mdio_write;
+	bus->parent = dev;
+	mdio_priv = bus->priv;
+	mdio_priv->hw = &pf->si->hw;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+	np = of_get_child_by_name(dev->of_node, "mdio");
+	if (!np) {
+		dev_err(dev, "MDIO node missing\n");
+		return -EINVAL;
+	}
+
+	err = of_mdiobus_register(bus, np);
+	if (err) {
+		of_node_put(np);
+		dev_err(dev, "cannot register MDIO bus\n");
+		return err;
+	}
+
+	of_node_put(np);
+	pf->mdio = bus;
+
+	return 0;
+}
+
+void enetc_mdio_remove(struct enetc_pf *pf)
+{
+	if (pf->mdio)
+		mdiobus_unregister(pf->mdio);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
new file mode 100644
index 0000000..60c9a38
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#include <linux/phy.h>
+#include "enetc_pf.h"
+
+struct enetc_mdio_priv {
+	struct enetc_hw *hw;
+};
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
new file mode 100644
index 0000000..40d22eb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc_pf.h"
+
+static void enetc_msg_disable_mr_int(struct enetc_hw *hw)
+{
+	u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+	/* disable MR int source(s) */
+	enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK);
+}
+
+static void enetc_msg_enable_mr_int(struct enetc_hw *hw)
+{
+	u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+
+	enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK);
+}
+
+static irqreturn_t enetc_msg_psi_msix(int irq, void *data)
+{
+	struct enetc_si *si = (struct enetc_si *)data;
+	struct enetc_pf *pf = enetc_si_priv(si);
+
+	enetc_msg_disable_mr_int(&si->hw);
+	schedule_work(&pf->msg_task);
+
+	return IRQ_HANDLED;
+}
+
+static void enetc_msg_task(struct work_struct *work)
+{
+	struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task);
+	struct enetc_hw *hw = &pf->si->hw;
+	unsigned long mr_mask;
+	int i;
+
+	for (;;) {
+		mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK;
+		if (!mr_mask) {
+			/* re-arm MR interrupts, w1c the IDR reg */
+			enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK);
+			enetc_msg_enable_mr_int(hw);
+			return;
+		}
+
+		for (i = 0; i < pf->num_vfs; i++) {
+			u32 psimsgrr;
+			u16 msg_code;
+
+			if (!(ENETC_PSIMSGRR_MR(i) & mr_mask))
+				continue;
+
+			enetc_msg_handle_rxmsg(pf, i, &msg_code);
+
+			psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code);
+			psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */
+			enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr);
+		}
+	}
+}
+
+/* Init */
+static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx)
+{
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct device *dev = &si->pdev->dev;
+	struct enetc_hw *hw = &si->hw;
+	struct enetc_msg_swbd *msg;
+	u32 val;
+
+	msg = &pf->rxmsg[idx];
+	/* allocate and set receive buffer */
+	msg->size = ENETC_DEFAULT_MSG_SIZE;
+
+	msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma,
+					GFP_KERNEL);
+	if (!msg->vaddr) {
+		dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n",
+			msg->size);
+		return -ENOMEM;
+	}
+
+	/* set multiple of 32 bytes */
+	val = lower_32_bits(msg->dma);
+	enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val);
+	val = upper_32_bits(msg->dma);
+	enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val);
+
+	return 0;
+}
+
+static void enetc_msg_free_mbx(struct enetc_si *si, int idx)
+{
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_hw *hw = &si->hw;
+	struct enetc_msg_swbd *msg;
+
+	msg = &pf->rxmsg[idx];
+	dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma);
+	memset(msg, 0, sizeof(*msg));
+
+	enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0);
+	enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0);
+}
+
+int enetc_msg_psi_init(struct enetc_pf *pf)
+{
+	struct enetc_si *si = pf->si;
+	int vector, i, err;
+
+	/* register message passing interrupt handler */
+	snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg",
+		 si->ndev->name);
+	vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX);
+	err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si);
+	if (err) {
+		dev_err(&si->pdev->dev,
+			"PSI messaging: request_irq() failed!\n");
+		return err;
+	}
+
+	/* set one IRQ entry for PSI message receive notification (SI int) */
+	enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX);
+
+	/* initialize PSI mailbox */
+	INIT_WORK(&pf->msg_task, enetc_msg_task);
+
+	for (i = 0; i < pf->num_vfs; i++) {
+		err = enetc_msg_alloc_mbx(si, i);
+		if (err)
+			goto err_init_mbx;
+	}
+
+	/* enable MR interrupts */
+	enetc_msg_enable_mr_int(&si->hw);
+
+	return 0;
+
+err_init_mbx:
+	for (i--; i >= 0; i--)
+		enetc_msg_free_mbx(si, i);
+
+	free_irq(vector, si);
+
+	return err;
+}
+
+void enetc_msg_psi_free(struct enetc_pf *pf)
+{
+	struct enetc_si *si = pf->si;
+	int i;
+
+	cancel_work_sync(&pf->msg_task);
+
+	/* disable MR interrupts */
+	enetc_msg_disable_mr_int(&si->hw);
+
+	for (i = 0; i < pf->num_vfs; i++)
+		enetc_msg_free_mbx(si, i);
+
+	/* de-register message passing interrupt handler */
+	free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
new file mode 100644
index 0000000..fbd41ce
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+#include <linux/of_mdio.h>
+#include "enetc_mdio.h"
+
+#define ENETC_MDIO_DEV_ID	0xee01
+#define ENETC_MDIO_DEV_NAME	"FSL PCIe IE Central MDIO"
+#define ENETC_MDIO_BUS_NAME	ENETC_MDIO_DEV_NAME " Bus"
+#define ENETC_MDIO_DRV_NAME	ENETC_MDIO_DEV_NAME " driver"
+
+static int enetc_pci_mdio_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct enetc_mdio_priv *mdio_priv;
+	struct device *dev = &pdev->dev;
+	struct enetc_hw *hw;
+	struct mii_bus *bus;
+	int err;
+
+	hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;
+
+	bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+	if (!bus)
+		return -ENOMEM;
+
+	bus->name = ENETC_MDIO_BUS_NAME;
+	bus->read = enetc_mdio_read;
+	bus->write = enetc_mdio_write;
+	bus->parent = dev;
+	mdio_priv = bus->priv;
+	mdio_priv->hw = hw;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+	pcie_flr(pdev);
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(dev, "device enable failed\n");
+		return err;
+	}
+
+	err = pci_request_region(pdev, 0, KBUILD_MODNAME);
+	if (err) {
+		dev_err(dev, "pci_request_region failed\n");
+		goto err_pci_mem_reg;
+	}
+
+	hw->port = pci_iomap(pdev, 0, 0);
+	if (!hw->port) {
+		err = -ENXIO;
+		dev_err(dev, "iomap failed\n");
+		goto err_ioremap;
+	}
+
+	err = of_mdiobus_register(bus, dev->of_node);
+	if (err)
+		goto err_mdiobus_reg;
+
+	pci_set_drvdata(pdev, bus);
+
+	return 0;
+
+err_mdiobus_reg:
+	iounmap(mdio_priv->hw->port);
+err_ioremap:
+	pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void enetc_pci_mdio_remove(struct pci_dev *pdev)
+{
+	struct mii_bus *bus = pci_get_drvdata(pdev);
+	struct enetc_mdio_priv *mdio_priv;
+
+	mdiobus_unregister(bus);
+	mdio_priv = bus->priv;
+	iounmap(mdio_priv->hw->port);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_pci_mdio_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+	{ 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
+
+static struct pci_driver enetc_pci_mdio_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = enetc_pci_mdio_id_table,
+	.probe = enetc_pci_mdio_probe,
+	.remove = enetc_pci_mdio_remove,
+};
+module_pci_driver(enetc_pci_mdio_driver);
+
+MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
new file mode 100644
index 0000000..b73421c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include "enetc_pf.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+			  __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC PF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
+{
+	u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
+	u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
+
+	*(u32 *)addr = upper;
+	*(u16 *)(addr + 4) = lower;
+}
+
+static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
+					  const u8 *addr)
+{
+	u32 upper = *(const u32 *)addr;
+	u16 lower = *(const u16 *)(addr + 4);
+
+	__raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
+	__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
+}
+
+static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct sockaddr *saddr = addr;
+
+	if (!is_valid_ether_addr(saddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+	enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+
+	return 0;
+}
+
+static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
+{
+	u32 val = enetc_port_rd(hw, ENETC_PSIPVMR);
+
+	val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL);
+	enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
+}
+
+static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
+{
+	return pf->vlan_promisc_simap & BIT(si_idx);
+}
+
+static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
+{
+	int i;
+
+	for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
+		return true;
+
+	return false;
+}
+
+static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+	pf->vlan_promisc_simap |= BIT(si_idx);
+	enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+	pf->vlan_promisc_simap &= ~BIT(si_idx);
+	enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
+{
+	u32 val = 0;
+
+	if (vlan)
+		val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan;
+
+	enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
+}
+
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+	u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+	u64 mask = 0;
+	int res = 0;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		mask |= BIT_ULL(i * 6);
+
+	for (i = 0; i < 6; i++)
+		res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+	return res;
+}
+
+static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+	filter->mac_addr_cnt = 0;
+
+	bitmap_zero(filter->mac_hash_table,
+		    ENETC_MADDR_HASH_TBL_SZ);
+}
+
+static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
+					 const unsigned char *addr)
+{
+	/* add exact match addr */
+	ether_addr_copy(filter->mac_addr, addr);
+	filter->mac_addr_cnt++;
+}
+
+static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+					 const unsigned char *addr)
+{
+	int idx = enetc_mac_addr_hash_idx(addr);
+
+	/* add hash table entry */
+	__set_bit(idx, filter->mac_hash_table);
+	filter->mac_addr_cnt++;
+}
+
+static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
+{
+	bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+	if (type == UC) {
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0);
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0);
+	} else { /* MC */
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0);
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0);
+	}
+}
+
+static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
+				 u32 *hash)
+{
+	bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+	if (type == UC) {
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
+	} else { /* MC */
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
+	}
+}
+
+static void enetc_sync_mac_filters(struct enetc_pf *pf)
+{
+	struct enetc_mac_filter *f = pf->mac_filter;
+	struct enetc_si *si = pf->si;
+	int i, pos;
+
+	pos = EMETC_MAC_ADDR_FILT_RES;
+
+	for (i = 0; i < MADDR_TYPE; i++, f++) {
+		bool em = (f->mac_addr_cnt == 1) && (i == UC);
+		bool clear = !f->mac_addr_cnt;
+
+		if (clear) {
+			if (i == UC)
+				enetc_clear_mac_flt_entry(si, pos);
+
+			enetc_clear_mac_ht_flt(si, 0, i);
+			continue;
+		}
+
+		/* exact match filter */
+		if (em) {
+			int err;
+
+			enetc_clear_mac_ht_flt(si, 0, UC);
+
+			err = enetc_set_mac_flt_entry(si, pos, f->mac_addr,
+						      BIT(0));
+			if (!err)
+				continue;
+
+			/* fallback to HT filtering */
+			dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n",
+				 err);
+		}
+
+		/* hash table filter, clear EM filter for UC entries */
+		if (i == UC)
+			enetc_clear_mac_flt_entry(si, pos);
+
+		enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
+	}
+}
+
+static void enetc_pf_set_rx_mode(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	struct enetc_hw *hw = &priv->si->hw;
+	bool uprom = false, mprom = false;
+	struct enetc_mac_filter *filter;
+	struct netdev_hw_addr *ha;
+	u32 psipmr = 0;
+	bool em;
+
+	if (ndev->flags & IFF_PROMISC) {
+		/* enable promisc mode for SI0 (PF) */
+		psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+		uprom = true;
+		mprom = true;
+		/* enable VLAN promisc mode for SI0 */
+		if (!enetc_si_vlan_promisc_is_on(pf, 0))
+			enetc_enable_si_vlan_promisc(pf, 0);
+
+	} else if (ndev->flags & IFF_ALLMULTI) {
+		/* enable multi cast promisc mode for SI0 (PF) */
+		psipmr = ENETC_PSIPMR_SET_MP(0);
+		mprom = true;
+	}
+
+	/* first 2 filter entries belong to PF */
+	if (!uprom) {
+		/* Update unicast filters */
+		filter = &pf->mac_filter[UC];
+		enetc_reset_mac_addr_filter(filter);
+
+		em = (netdev_uc_count(ndev) == 1);
+		netdev_for_each_uc_addr(ha, ndev) {
+			if (em) {
+				enetc_add_mac_addr_em_filter(filter, ha->addr);
+				break;
+			}
+
+			enetc_add_mac_addr_ht_filter(filter, ha->addr);
+		}
+	}
+
+	if (!mprom) {
+		/* Update multicast filters */
+		filter = &pf->mac_filter[MC];
+		enetc_reset_mac_addr_filter(filter);
+
+		netdev_for_each_mc_addr(ha, ndev) {
+			if (!is_multicast_ether_addr(ha->addr))
+				continue;
+
+			enetc_add_mac_addr_ht_filter(filter, ha->addr);
+		}
+	}
+
+	if (!uprom || !mprom)
+		/* update PF entries */
+		enetc_sync_mac_filters(pf);
+
+	psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) &
+		  ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0));
+	enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
+}
+
+static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
+				     u32 *hash)
+{
+	enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
+	enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
+}
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+	int res = 0;
+	int i;
+
+	for (i = 0; i < 6; i++)
+		res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+	return res;
+}
+
+static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
+{
+	int i;
+
+	if (rehash) {
+		bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+
+		for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+			int hidx = enetc_vid_hash_idx(i);
+
+			__set_bit(hidx, pf->vlan_ht_filter);
+		}
+	}
+
+	enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
+}
+
+static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	int idx;
+
+	if (enetc_si_vlan_promisc_is_on(pf, 0))
+		enetc_disable_si_vlan_promisc(pf, 0);
+
+	__set_bit(vid, pf->active_vlans);
+
+	idx = enetc_vid_hash_idx(vid);
+	if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+		enetc_sync_vlan_ht_filter(pf, false);
+
+	return 0;
+}
+
+static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+	__clear_bit(vid, pf->active_vlans);
+	enetc_sync_vlan_ht_filter(pf, true);
+
+	if (!enetc_vlan_filter_is_on(pf))
+		enetc_enable_si_vlan_promisc(pf, 0);
+
+	return 0;
+}
+
+static void enetc_set_loopback(struct net_device *ndev, bool en)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	u32 reg;
+
+	reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+	if (reg & ENETC_PMO_IFM_RG) {
+		/* RGMII mode */
+		reg = (reg & ~ENETC_PM0_IFM_RLP) |
+		      (en ? ENETC_PM0_IFM_RLP : 0);
+		enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+	} else {
+		/* assume SGMII mode */
+		reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+		reg = (reg & ~ENETC_PM0_CMD_XGLP) |
+		      (en ? ENETC_PM0_CMD_XGLP : 0);
+		reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
+		      (en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
+		enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
+		enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+	}
+}
+
+static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	struct enetc_vf_state *vf_state;
+
+	if (vf >= pf->total_vfs)
+		return -EINVAL;
+
+	if (!is_valid_ether_addr(mac))
+		return -EADDRNOTAVAIL;
+
+	vf_state = &pf->vf_state[vf];
+	vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC;
+	enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac);
+	return 0;
+}
+
+static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan,
+				u8 qos, __be16 proto)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+	if (priv->si->errata & ENETC_ERR_VLAN_ISOL)
+		return -EOPNOTSUPP;
+
+	if (vf >= pf->total_vfs)
+		return -EINVAL;
+
+	if (proto != htons(ETH_P_8021Q))
+		/* only C-tags supported for now */
+		return -EPROTONOSUPPORT;
+
+	enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos);
+	return 0;
+}
+
+static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	u32 cfgr;
+
+	if (vf >= pf->total_vfs)
+		return -EINVAL;
+
+	cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1));
+	cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0);
+	enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr);
+
+	return 0;
+}
+
+static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+{
+	unsigned char mac_addr[MAX_ADDR_LEN];
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_hw *hw = &si->hw;
+	int i;
+
+	/* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
+	for (i = 0; i < pf->total_vfs + 1; i++) {
+		enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
+		if (!is_zero_ether_addr(mac_addr))
+			continue;
+		eth_random_addr(mac_addr);
+		dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
+			 i, mac_addr);
+		enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+	}
+}
+
+static void enetc_port_assign_rfs_entries(struct enetc_si *si)
+{
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_hw *hw = &si->hw;
+	int num_entries, vf_entries, i;
+	u32 val;
+
+	/* split RFS entries between functions */
+	val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+	num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+	vf_entries = num_entries / (pf->total_vfs + 1);
+
+	for (i = 0; i < pf->total_vfs; i++)
+		enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries);
+	enetc_port_wr(hw, ENETC_PSIRFSCFGR(0),
+		      num_entries - vf_entries * pf->total_vfs);
+
+	/* enable RFS on port */
+	enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
+}
+
+static void enetc_port_si_configure(struct enetc_si *si)
+{
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_hw *hw = &si->hw;
+	int num_rings, i;
+	u32 val;
+
+	val = enetc_port_rd(hw, ENETC_PCAPR0);
+	num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
+
+	val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS);
+	val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS);
+
+	if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) {
+		val = ENETC_PSICFGR0_SET_TXBDR(num_rings);
+		val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+
+		dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n",
+			 num_rings, ENETC_PF_NUM_RINGS);
+
+		num_rings = 0;
+	}
+
+	/* Add default one-time settings for SI0 (PF) */
+	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+	enetc_port_wr(hw, ENETC_PSICFGR0(0), val);
+
+	if (num_rings)
+		num_rings -= ENETC_PF_NUM_RINGS;
+
+	/* Configure the SIs for each available VF */
+	val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+	val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+	if (num_rings) {
+		num_rings /= pf->total_vfs;
+		val |= ENETC_PSICFGR0_SET_TXBDR(num_rings);
+		val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+	}
+
+	for (i = 0; i < pf->total_vfs; i++)
+		enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val);
+
+	/* Port level VLAN settings */
+	val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+	enetc_port_wr(hw, ENETC_PVCLCTR, val);
+	/* use outer tag for VLAN filtering */
+	enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+	enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+		      ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+	enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+	enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+	enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+		      ENETC_PM0_CMD_TXP	| ENETC_PM0_PROMISC |
+		      ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+
+	enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+		      ENETC_PM0_CMD_TXP	| ENETC_PM0_PROMISC |
+		      ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+	/* set auto-speed for RGMII */
+	if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG)
+		enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
+	if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII)
+		enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
+}
+
+static void enetc_configure_port_pmac(struct enetc_hw *hw)
+{
+	u32 temp;
+
+	/* Set pMAC step lock */
+	temp = enetc_port_rd(hw, ENETC_PFPMR);
+	enetc_port_wr(hw, ENETC_PFPMR,
+		      temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
+
+	temp = enetc_port_rd(hw, ENETC_MMCSR);
+	enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+}
+
+static void enetc_configure_port(struct enetc_pf *pf)
+{
+	u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
+	struct enetc_hw *hw = &pf->si->hw;
+
+	enetc_configure_port_pmac(hw);
+
+	enetc_configure_port_mac(hw);
+
+	enetc_port_si_configure(pf->si);
+
+	/* set up hash key */
+	get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+	enetc_set_rss_key(hw, hash_key);
+
+	/* split up RFS entries */
+	enetc_port_assign_rfs_entries(pf->si);
+
+	/* fix-up primary MAC addresses, if not set already */
+	enetc_port_setup_primary_mac_address(pf->si);
+
+	/* enforce VLAN promisc mode for all SIs */
+	pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
+	enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
+
+	enetc_port_wr(hw, ENETC_PSIPMR, 0);
+
+	/* enable port */
+	enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN);
+}
+
+/* Messaging */
+static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf,
+						int vf_id)
+{
+	struct enetc_vf_state *vf_state = &pf->vf_state[vf_id];
+	struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+	struct enetc_msg_cmd_set_primary_mac *cmd;
+	struct device *dev = &pf->si->pdev->dev;
+	u16 cmd_id;
+	char *addr;
+
+	cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr;
+	cmd_id = cmd->header.id;
+	if (cmd_id != ENETC_MSG_CMD_MNG_ADD)
+		return ENETC_MSG_CMD_STATUS_FAIL;
+
+	addr = cmd->mac.sa_data;
+	if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC)
+		dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n",
+			 vf_id);
+	else
+		enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr);
+
+	return ENETC_MSG_CMD_STATUS_OK;
+}
+
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status)
+{
+	struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+	struct device *dev = &pf->si->pdev->dev;
+	struct enetc_msg_cmd_header *cmd_hdr;
+	u16 cmd_type;
+
+	*status = ENETC_MSG_CMD_STATUS_OK;
+	cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr;
+	cmd_type = cmd_hdr->type;
+
+	switch (cmd_type) {
+	case ENETC_MSG_CMD_MNG_MAC:
+		*status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id);
+		break;
+	default:
+		dev_err(dev, "command not supported (cmd_type: 0x%x)\n",
+			cmd_type);
+	}
+}
+
+#ifdef CONFIG_PCI_IOV
+static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_pf *pf = enetc_si_priv(si);
+	int err;
+
+	if (!num_vfs) {
+		enetc_msg_psi_free(pf);
+		kfree(pf->vf_state);
+		pf->num_vfs = 0;
+		pci_disable_sriov(pdev);
+	} else {
+		pf->num_vfs = num_vfs;
+
+		pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
+				       GFP_KERNEL);
+		if (!pf->vf_state) {
+			pf->num_vfs = 0;
+			return -ENOMEM;
+		}
+
+		err = enetc_msg_psi_init(pf);
+		if (err) {
+			dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
+			goto err_msg_psi;
+		}
+
+		err = pci_enable_sriov(pdev, num_vfs);
+		if (err) {
+			dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err);
+			goto err_en_sriov;
+		}
+	}
+
+	return num_vfs;
+
+err_en_sriov:
+	enetc_msg_psi_free(pf);
+err_msg_psi:
+	kfree(pf->vf_state);
+	pf->num_vfs = 0;
+
+	return err;
+}
+#else
+#define enetc_sriov_configure(pdev, num_vfs)	(void)0
+#endif
+
+static int enetc_pf_set_features(struct net_device *ndev,
+				 netdev_features_t features)
+{
+	netdev_features_t changed = ndev->features ^ features;
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		enetc_enable_rxvlan(&priv->si->hw, 0,
+				    !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+		enetc_enable_txvlan(&priv->si->hw, 0,
+				    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
+	if (changed & NETIF_F_LOOPBACK)
+		enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+	return enetc_set_features(ndev, features);
+}
+
+static const struct net_device_ops enetc_ndev_ops = {
+	.ndo_open		= enetc_open,
+	.ndo_stop		= enetc_close,
+	.ndo_start_xmit		= enetc_xmit,
+	.ndo_get_stats		= enetc_get_stats,
+	.ndo_set_mac_address	= enetc_pf_set_mac_addr,
+	.ndo_set_rx_mode	= enetc_pf_set_rx_mode,
+	.ndo_vlan_rx_add_vid	= enetc_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= enetc_vlan_rx_del_vid,
+	.ndo_set_vf_mac		= enetc_pf_set_vf_mac,
+	.ndo_set_vf_vlan	= enetc_pf_set_vf_vlan,
+	.ndo_set_vf_spoofchk	= enetc_pf_set_vf_spoofchk,
+	.ndo_set_features	= enetc_pf_set_features,
+	.ndo_do_ioctl		= enetc_ioctl,
+	.ndo_setup_tc		= enetc_setup_tc,
+};
+
+static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+				  const struct net_device_ops *ndev_ops)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	SET_NETDEV_DEV(ndev, &si->pdev->dev);
+	priv->ndev = ndev;
+	priv->si = si;
+	priv->dev = &si->pdev->dev;
+	si->ndev = ndev;
+
+	priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+	ndev->netdev_ops = ndev_ops;
+	enetc_set_ethtool_ops(ndev);
+	ndev->watchdog_timeo = 5 * HZ;
+	ndev->max_mtu = ENETC_MAX_MTU;
+
+	ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			    NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_LOOPBACK;
+	ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+			 NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			 NETIF_F_HW_VLAN_CTAG_TX |
+			 NETIF_F_HW_VLAN_CTAG_RX |
+			 NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (si->num_rss)
+		ndev->hw_features |= NETIF_F_RXHASH;
+
+	if (si->errata & ENETC_ERR_TXCSUM) {
+		ndev->hw_features &= ~NETIF_F_HW_CSUM;
+		ndev->features &= ~NETIF_F_HW_CSUM;
+	}
+
+	ndev->priv_flags |= IFF_UNICAST_FLT;
+
+	/* pick up primary MAC address from SI */
+	enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
+{
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	struct device_node *np = priv->dev->of_node;
+	struct device_node *mdio_np;
+	int err;
+
+	if (!np) {
+		dev_err(priv->dev, "missing ENETC port node\n");
+		return -ENODEV;
+	}
+
+	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+	if (!priv->phy_node) {
+		if (!of_phy_is_fixed_link(np)) {
+			dev_err(priv->dev, "PHY not specified\n");
+			return -ENODEV;
+		}
+
+		err = of_phy_register_fixed_link(np);
+		if (err < 0) {
+			dev_err(priv->dev, "fixed link registration failed\n");
+			return err;
+		}
+
+		priv->phy_node = of_node_get(np);
+	}
+
+	mdio_np = of_get_child_by_name(np, "mdio");
+	if (mdio_np) {
+		of_node_put(mdio_np);
+		err = enetc_mdio_probe(pf);
+		if (err) {
+			of_node_put(priv->phy_node);
+			return err;
+		}
+	}
+
+	priv->if_mode = of_get_phy_mode(np);
+	if ((int)priv->if_mode < 0) {
+		dev_err(priv->dev, "missing phy type\n");
+		of_node_put(priv->phy_node);
+		if (of_phy_is_fixed_link(np))
+			of_phy_deregister_fixed_link(np);
+		else
+			enetc_mdio_remove(pf);
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void enetc_of_put_phy(struct enetc_ndev_priv *priv)
+{
+	struct device_node *np = priv->dev->of_node;
+
+	if (np && of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	if (priv->phy_node)
+		of_node_put(priv->phy_node);
+}
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *ent)
+{
+	struct enetc_ndev_priv *priv;
+	struct net_device *ndev;
+	struct enetc_si *si;
+	struct enetc_pf *pf;
+	int err;
+
+	if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+		dev_info(&pdev->dev, "device is disabled, skipping\n");
+		return -ENODEV;
+	}
+
+	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+	if (err) {
+		dev_err(&pdev->dev, "PCI probing failed\n");
+		return err;
+	}
+
+	si = pci_get_drvdata(pdev);
+	if (!si->hw.port || !si->hw.global) {
+		err = -ENODEV;
+		dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
+		goto err_map_pf_space;
+	}
+
+	pf = enetc_si_priv(si);
+	pf->si = si;
+	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+
+	enetc_configure_port(pf);
+
+	enetc_get_si_caps(si);
+
+	ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+	if (!ndev) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "netdev creation failed\n");
+		goto err_alloc_netdev;
+	}
+
+	enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+	priv = netdev_priv(ndev);
+
+	enetc_init_si_rings_params(priv);
+
+	err = enetc_alloc_si_resources(priv);
+	if (err) {
+		dev_err(&pdev->dev, "SI resource alloc failed\n");
+		goto err_alloc_si_res;
+	}
+
+	err = enetc_alloc_msix(priv);
+	if (err) {
+		dev_err(&pdev->dev, "MSIX alloc failed\n");
+		goto err_alloc_msix;
+	}
+
+	err = enetc_of_get_phy(priv);
+	if (err)
+		dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
+
+	err = register_netdev(ndev);
+	if (err)
+		goto err_reg_netdev;
+
+	netif_carrier_off(ndev);
+
+	netif_info(priv, probe, ndev, "%s v%s\n",
+		   enetc_drv_name, enetc_drv_ver);
+
+	return 0;
+
+err_reg_netdev:
+	enetc_of_put_phy(priv);
+	enetc_free_msix(priv);
+err_alloc_msix:
+	enetc_free_si_resources(priv);
+err_alloc_si_res:
+	si->ndev = NULL;
+	free_netdev(ndev);
+err_alloc_netdev:
+err_map_pf_space:
+	enetc_pci_remove(pdev);
+
+	return err;
+}
+
+static void enetc_pf_remove(struct pci_dev *pdev)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_ndev_priv *priv;
+
+	if (pf->num_vfs)
+		enetc_sriov_configure(pdev, 0);
+
+	priv = netdev_priv(si->ndev);
+	netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+		   enetc_drv_name, enetc_drv_ver);
+
+	unregister_netdev(si->ndev);
+
+	enetc_mdio_remove(pf);
+	enetc_of_put_phy(priv);
+
+	enetc_free_msix(priv);
+
+	enetc_free_si_resources(priv);
+
+	free_netdev(si->ndev);
+
+	enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_pf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
+	{ 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pf_id_table);
+
+static struct pci_driver enetc_pf_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = enetc_pf_id_table,
+	.probe = enetc_pf_probe,
+	.remove = enetc_pf_remove,
+#ifdef CONFIG_PCI_IOV
+	.sriov_configure = enetc_sriov_configure,
+#endif
+};
+module_pci_driver(enetc_pf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
new file mode 100644
index 0000000..10dd1b5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+#define ENETC_PF_NUM_RINGS	8
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+#define ENETC_MAX_NUM_MAC_FLT	((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
+
+#define ENETC_MADDR_HASH_TBL_SZ	64
+struct enetc_mac_filter {
+	union {
+		char mac_addr[ETH_ALEN];
+		DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+	};
+	int mac_addr_cnt;
+};
+
+#define ENETC_VLAN_HT_SIZE	64
+
+enum enetc_vf_flags {
+	ENETC_VF_FLAG_PF_SET_MAC	= BIT(0),
+};
+
+struct enetc_vf_state {
+	enum enetc_vf_flags flags;
+};
+
+struct enetc_pf {
+	struct enetc_si *si;
+	int num_vfs; /* number of active VFs, after sriov_init */
+	int total_vfs; /* max number of VFs, set for PF at probe */
+	struct enetc_vf_state *vf_state;
+
+	struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT];
+
+	struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS];
+	struct work_struct msg_task;
+	char msg_int_name[ENETC_INT_NAME_MAX];
+
+	char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */
+	DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+	DECLARE_BITMAP(active_vlans, VLAN_N_VID);
+
+	struct mii_bus *mdio; /* saved for cleanup */
+};
+
+int enetc_msg_psi_init(struct enetc_pf *pf);
+void enetc_msg_psi_free(struct enetc_pf *pf);
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
+
+/* MDIO */
+int enetc_mdio_probe(struct enetc_pf *pf);
+void enetc_mdio_remove(struct enetc_pf *pf);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
new file mode 100644
index 0000000..bc59489
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "enetc.h"
+
+int enetc_phc_index = -1;
+EXPORT_SYMBOL(enetc_phc_index);
+
+static struct ptp_clock_info enetc_ptp_caps = {
+	.owner		= THIS_MODULE,
+	.name		= "ENETC PTP clock",
+	.max_adj	= 512000,
+	.n_alarm	= 0,
+	.n_ext_ts	= 2,
+	.n_per_out	= 0,
+	.n_pins		= 0,
+	.pps		= 1,
+	.adjfine	= ptp_qoriq_adjfine,
+	.adjtime	= ptp_qoriq_adjtime,
+	.gettime64	= ptp_qoriq_gettime,
+	.settime64	= ptp_qoriq_settime,
+	.enable		= ptp_qoriq_enable,
+};
+
+static int enetc_ptp_probe(struct pci_dev *pdev,
+			   const struct pci_device_id *ent)
+{
+	struct ptp_qoriq *ptp_qoriq;
+	void __iomem *base;
+	int err, len, n;
+
+	if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+		dev_info(&pdev->dev, "device is disabled, skipping\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "device enable failed\n");
+		return err;
+	}
+
+	/* set up for high or low dma */
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (err) {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"DMA configuration failed: 0x%x\n", err);
+			goto err_dma;
+		}
+	}
+
+	err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+		goto err_pci_mem_reg;
+	}
+
+	pci_set_master(pdev);
+
+	ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL);
+	if (!ptp_qoriq) {
+		err = -ENOMEM;
+		goto err_alloc_ptp;
+	}
+
+	len = pci_resource_len(pdev, ENETC_BAR_REGS);
+
+	base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+	if (!base) {
+		err = -ENXIO;
+		dev_err(&pdev->dev, "ioremap() failed\n");
+		goto err_ioremap;
+	}
+
+	/* Allocate 1 interrupt */
+	n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+	if (n != 1) {
+		err = -EPERM;
+		goto err_irq_vectors;
+	}
+
+	ptp_qoriq->irq = pci_irq_vector(pdev, 0);
+
+	err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq);
+	if (err) {
+		dev_err(&pdev->dev, "request_irq() failed!\n");
+		goto err_irq;
+	}
+
+	ptp_qoriq->dev = &pdev->dev;
+
+	err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps);
+	if (err)
+		goto err_no_clock;
+
+	enetc_phc_index = ptp_qoriq->phc_index;
+	pci_set_drvdata(pdev, ptp_qoriq);
+
+	return 0;
+
+err_no_clock:
+	free_irq(ptp_qoriq->irq, ptp_qoriq);
+err_irq:
+	pci_free_irq_vectors(pdev);
+err_irq_vectors:
+	iounmap(base);
+err_ioremap:
+	kfree(ptp_qoriq);
+err_alloc_ptp:
+	pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void enetc_ptp_remove(struct pci_dev *pdev)
+{
+	struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
+
+	enetc_phc_index = -1;
+	ptp_qoriq_free(ptp_qoriq);
+	pci_free_irq_vectors(pdev);
+	kfree(ptp_qoriq);
+
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_ptp_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) },
+	{ 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table);
+
+static struct pci_driver enetc_ptp_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = enetc_ptp_id_table,
+	.probe = enetc_ptp_probe,
+	.remove = enetc_ptp_remove,
+};
+module_pci_driver(enetc_ptp_driver);
+
+MODULE_DESCRIPTION("ENETC PTP clock driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
new file mode 100644
index 0000000..ebd21bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include "enetc.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+			  __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC VF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+/* Messaging */
+static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
+				    struct enetc_msg_swbd *msg)
+{
+	u32 val;
+
+	val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma);
+	enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma));
+	enetc_wr(hw, ENETC_VSIMSGSNDAR0, val);
+}
+
+static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
+{
+	int timeout = 100;
+	u32 vsimsgsr;
+
+	enetc_msg_vsi_write_msg(&si->hw, msg);
+
+	do {
+		vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR);
+		if (!(vsimsgsr & ENETC_VSIMSGSR_MB))
+			break;
+
+		usleep_range(1000, 2000);
+	} while (--timeout);
+
+	if (!timeout)
+		return -ETIMEDOUT;
+
+	/* check for message delivery error */
+	if (vsimsgsr & ENETC_VSIMSGSR_MS) {
+		dev_err(&si->pdev->dev, "VSI command execute error: %d\n",
+			ENETC_SIMSGSR_GET_MC(vsimsgsr));
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
+					      struct sockaddr *saddr)
+{
+	struct enetc_msg_cmd_set_primary_mac *cmd;
+	struct enetc_msg_swbd msg;
+	int err;
+
+	msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64);
+	msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
+				       GFP_KERNEL);
+	if (!msg.vaddr) {
+		dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n",
+			msg.size);
+		return -ENOMEM;
+	}
+
+	cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr;
+	cmd->header.type = ENETC_MSG_CMD_MNG_MAC;
+	cmd->header.id = ENETC_MSG_CMD_MNG_ADD;
+	memcpy(&cmd->mac, saddr, sizeof(struct sockaddr));
+
+	/* send the command and wait */
+	err = enetc_msg_vsi_send(priv->si, &msg);
+
+	dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
+
+	return err;
+}
+
+static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct sockaddr *saddr = addr;
+	int err;
+
+	if (!is_valid_ether_addr(saddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int enetc_vf_set_features(struct net_device *ndev,
+				 netdev_features_t features)
+{
+	return enetc_set_features(ndev, features);
+}
+
+/* Probing/ Init */
+static const struct net_device_ops enetc_ndev_ops = {
+	.ndo_open		= enetc_open,
+	.ndo_stop		= enetc_close,
+	.ndo_start_xmit		= enetc_xmit,
+	.ndo_get_stats		= enetc_get_stats,
+	.ndo_set_mac_address	= enetc_vf_set_mac_addr,
+	.ndo_set_features	= enetc_vf_set_features,
+	.ndo_do_ioctl		= enetc_ioctl,
+	.ndo_setup_tc		= enetc_setup_tc,
+};
+
+static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+				  const struct net_device_ops *ndev_ops)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	SET_NETDEV_DEV(ndev, &si->pdev->dev);
+	priv->ndev = ndev;
+	priv->si = si;
+	priv->dev = &si->pdev->dev;
+	si->ndev = ndev;
+
+	priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+	ndev->netdev_ops = ndev_ops;
+	enetc_set_ethtool_ops(ndev);
+	ndev->watchdog_timeo = 5 * HZ;
+	ndev->max_mtu = ENETC_MAX_MTU;
+
+	ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			    NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_CTAG_RX;
+	ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+			 NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			 NETIF_F_HW_VLAN_CTAG_TX |
+			 NETIF_F_HW_VLAN_CTAG_RX;
+
+	if (si->num_rss)
+		ndev->hw_features |= NETIF_F_RXHASH;
+
+	if (si->errata & ENETC_ERR_TXCSUM) {
+		ndev->hw_features &= ~NETIF_F_HW_CSUM;
+		ndev->features &= ~NETIF_F_HW_CSUM;
+	}
+
+	/* pick up primary MAC address from SI */
+	enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_vf_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *ent)
+{
+	struct enetc_ndev_priv *priv;
+	struct net_device *ndev;
+	struct enetc_si *si;
+	int err;
+
+	err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
+	if (err) {
+		dev_err(&pdev->dev, "PCI probing failed\n");
+		return err;
+	}
+
+	si = pci_get_drvdata(pdev);
+
+	enetc_get_si_caps(si);
+
+	ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+	if (!ndev) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "netdev creation failed\n");
+		goto err_alloc_netdev;
+	}
+
+	enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+	priv = netdev_priv(ndev);
+
+	enetc_init_si_rings_params(priv);
+
+	err = enetc_alloc_si_resources(priv);
+	if (err) {
+		dev_err(&pdev->dev, "SI resource alloc failed\n");
+		goto err_alloc_si_res;
+	}
+
+	err = enetc_alloc_msix(priv);
+	if (err) {
+		dev_err(&pdev->dev, "MSIX alloc failed\n");
+		goto err_alloc_msix;
+	}
+
+	err = register_netdev(ndev);
+	if (err)
+		goto err_reg_netdev;
+
+	netif_carrier_off(ndev);
+
+	netif_info(priv, probe, ndev, "%s v%s\n",
+		   enetc_drv_name, enetc_drv_ver);
+
+	return 0;
+
+err_reg_netdev:
+	enetc_free_msix(priv);
+err_alloc_msix:
+	enetc_free_si_resources(priv);
+err_alloc_si_res:
+	si->ndev = NULL;
+	free_netdev(ndev);
+err_alloc_netdev:
+	enetc_pci_remove(pdev);
+
+	return err;
+}
+
+static void enetc_vf_remove(struct pci_dev *pdev)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_ndev_priv *priv;
+
+	priv = netdev_priv(si->ndev);
+	netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+		   enetc_drv_name, enetc_drv_ver);
+	unregister_netdev(si->ndev);
+
+	enetc_free_msix(priv);
+
+	enetc_free_si_resources(priv);
+
+	free_netdev(si->ndev);
+
+	enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_vf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+	{ 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_vf_id_table);
+
+static struct pci_driver enetc_vf_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = enetc_vf_id_table,
+	.probe = enetc_vf_probe,
+	.remove = enetc_vf_remove,
+};
+module_pci_driver(enetc_vf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index bf80855..f79e57f 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -531,7 +531,6 @@
 
 	/* Phylib and MDIO interface */
 	struct	mii_bus *mii_bus;
-	int	mii_timeout;
 	uint	phy_speed;
 	phy_interface_t	phy_interface;
 	struct device_node *phy_node;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7b98bb7..4bb3076 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -208,8 +208,11 @@
 
 /* FEC MII MMFR bits definition */
 #define FEC_MMFR_ST		(1 << 30)
+#define FEC_MMFR_ST_C45		(0)
 #define FEC_MMFR_OP_READ	(2 << 28)
+#define FEC_MMFR_OP_READ_C45	(3 << 28)
 #define FEC_MMFR_OP_WRITE	(1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE	(0)
 #define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
 #define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
 #define FEC_MMFR_TA		(2 << 16)
@@ -365,7 +368,7 @@
 		status = fec16_to_cpu(bdp->cbd_sc);
 		status &= ~BD_ENET_TX_STATS;
 		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-		frag_len = skb_shinfo(skb)->frags[frag].size;
+		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
 
 		/* Handle the last BD specially */
 		if (frag == nr_frags - 1) {
@@ -387,7 +390,7 @@
 			ebdp->cbd_esc = cpu_to_fec32(estatus);
 		}
 
-		bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+		bufaddr = skb_frag_address(this_frag);
 
 		index = fec_enet_get_bd_index(bdp, &txq->bd);
 		if (((unsigned long) bufaddr) & fep->tx_align ||
@@ -1655,7 +1658,7 @@
 		struct device_node *np = fep->pdev->dev.of_node;
 		if (np) {
 			const char *mac = of_get_mac_address(np);
-			if (mac)
+			if (!IS_ERR(mac))
 				iap = (unsigned char *) mac;
 		}
 	}
@@ -1689,10 +1692,10 @@
 	 */
 	if (!is_valid_ether_addr(iap)) {
 		/* Report it and use a random ethernet address instead */
-		netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
 		eth_hw_addr_random(ndev);
-		netdev_info(ndev, "Using random MAC address: %pM\n",
-			    ndev->dev_addr);
+		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+			 ndev->dev_addr);
 		return;
 	}
 
@@ -1714,12 +1717,6 @@
 	struct phy_device *phy_dev = ndev->phydev;
 	int status_change = 0;
 
-	/* Prevent a state halted on mii error */
-	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
-		phy_dev->state = PHY_RESUMING;
-		return;
-	}
-
 	/*
 	 * If the netdev is down, or is going down, we're not interested
 	 * in link state events, so just mark our idea of the link as down
@@ -1773,25 +1770,52 @@
 	struct fec_enet_private *fep = bus->priv;
 	struct device *dev = &fep->pdev->dev;
 	unsigned long time_left;
-	int ret = 0;
+	int ret = 0, frame_start, frame_addr, frame_op;
+	bool is_c45 = !!(regnum & MII_ADDR_C45);
 
 	ret = pm_runtime_get_sync(dev);
 	if (ret < 0)
 		return ret;
 
-	fep->mii_timeout = 0;
 	reinit_completion(&fep->mdio_done);
 
+	if (is_c45) {
+		frame_start = FEC_MMFR_ST_C45;
+
+		/* write address */
+		frame_addr = (regnum >> 16);
+		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+		       FEC_MMFR_TA | (regnum & 0xFFFF),
+		       fep->hwp + FEC_MII_DATA);
+
+		/* wait for end of transfer */
+		time_left = wait_for_completion_timeout(&fep->mdio_done,
+				usecs_to_jiffies(FEC_MII_TIMEOUT));
+		if (time_left == 0) {
+			netdev_err(fep->netdev, "MDIO address write timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		frame_op = FEC_MMFR_OP_READ_C45;
+
+	} else {
+		/* C22 read */
+		frame_op = FEC_MMFR_OP_READ;
+		frame_start = FEC_MMFR_ST;
+		frame_addr = regnum;
+	}
+
 	/* start a read op */
-	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
-		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+	writel(frame_start | frame_op |
+		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
 		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
 
 	/* wait for end of transfer */
 	time_left = wait_for_completion_timeout(&fep->mdio_done,
 			usecs_to_jiffies(FEC_MII_TIMEOUT));
 	if (time_left == 0) {
-		fep->mii_timeout = 1;
 		netdev_err(fep->netdev, "MDIO read timeout\n");
 		ret = -ETIMEDOUT;
 		goto out;
@@ -1812,7 +1836,8 @@
 	struct fec_enet_private *fep = bus->priv;
 	struct device *dev = &fep->pdev->dev;
 	unsigned long time_left;
-	int ret;
+	int ret, frame_start, frame_addr;
+	bool is_c45 = !!(regnum & MII_ADDR_C45);
 
 	ret = pm_runtime_get_sync(dev);
 	if (ret < 0)
@@ -1820,12 +1845,35 @@
 	else
 		ret = 0;
 
-	fep->mii_timeout = 0;
 	reinit_completion(&fep->mdio_done);
 
+	if (is_c45) {
+		frame_start = FEC_MMFR_ST_C45;
+
+		/* write address */
+		frame_addr = (regnum >> 16);
+		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+		       FEC_MMFR_TA | (regnum & 0xFFFF),
+		       fep->hwp + FEC_MII_DATA);
+
+		/* wait for end of transfer */
+		time_left = wait_for_completion_timeout(&fep->mdio_done,
+			usecs_to_jiffies(FEC_MII_TIMEOUT));
+		if (time_left == 0) {
+			netdev_err(fep->netdev, "MDIO address write timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+	} else {
+		/* C22 write */
+		frame_start = FEC_MMFR_ST;
+		frame_addr = regnum;
+	}
+
 	/* start a write op */
-	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
-		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+	writel(frame_start | FEC_MMFR_OP_WRITE |
+		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
 		FEC_MMFR_TA | FEC_MMFR_DATA(value),
 		fep->hwp + FEC_MII_DATA);
 
@@ -1833,11 +1881,11 @@
 	time_left = wait_for_completion_timeout(&fep->mdio_done,
 			usecs_to_jiffies(FEC_MII_TIMEOUT));
 	if (time_left == 0) {
-		fep->mii_timeout = 1;
 		netdev_err(fep->netdev, "MDIO write timeout\n");
 		ret  = -ETIMEDOUT;
 	}
 
+out:
 	pm_runtime_mark_last_busy(dev);
 	pm_runtime_put_autosuspend(dev);
 
@@ -1850,13 +1898,9 @@
 	int ret;
 
 	if (enable) {
-		ret = clk_prepare_enable(fep->clk_ahb);
-		if (ret)
-			return ret;
-
 		ret = clk_prepare_enable(fep->clk_enet_out);
 		if (ret)
-			goto failed_clk_enet_out;
+			return ret;
 
 		if (fep->clk_ptp) {
 			mutex_lock(&fep->ptp_clk_mutex);
@@ -1876,7 +1920,6 @@
 
 		phy_reset_after_clk_enable(ndev->phydev);
 	} else {
-		clk_disable_unprepare(fep->clk_ahb);
 		clk_disable_unprepare(fep->clk_enet_out);
 		if (fep->clk_ptp) {
 			mutex_lock(&fep->ptp_clk_mutex);
@@ -1895,8 +1938,6 @@
 failed_clk_ptp:
 	if (fep->clk_enet_out)
 		clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-		clk_disable_unprepare(fep->clk_ahb);
 
 	return ret;
 }
@@ -1948,16 +1989,15 @@
 
 	/* mask with MAC supported features */
 	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
-		phy_dev->supported &= PHY_GBIT_FEATURES;
-		phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
+		phy_set_max_speed(phy_dev, 1000);
+		phy_remove_link_mode(phy_dev,
+				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
 #if !defined(CONFIG_M5272)
-		phy_dev->supported |= SUPPORTED_Pause;
+		phy_support_sym_pause(phy_dev);
 #endif
 	}
 	else
-		phy_dev->supported &= PHY_BASIC_FEATURES;
-
-	phy_dev->advertising = phy_dev->supported;
+		phy_set_max_speed(phy_dev, 100);
 
 	fep->link = 0;
 	fep->full_duplex = 0;
@@ -2002,8 +2042,6 @@
 		return -ENOENT;
 	}
 
-	fep->mii_timeout = 0;
-
 	/*
 	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
 	 *
@@ -2057,8 +2095,7 @@
 
 	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
 	err = of_mdiobus_register(fep->mii_bus, node);
-	if (node)
-		of_node_put(node);
+	of_node_put(node);
 	if (err)
 		goto err_out_free_mdiobus;
 
@@ -2112,6 +2149,7 @@
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
 	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
 	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+static __u32 fec_enet_register_version = 2;
 static u32 fec_enet_register_offset[] = {
 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2142,6 +2180,7 @@
 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
 };
 #else
+static __u32 fec_enet_register_version = 1;
 static u32 fec_enet_register_offset[] = {
 	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
 	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
@@ -2163,6 +2202,8 @@
 	u32 *buf = (u32 *)regbuf;
 	u32 i, off;
 
+	regs->version = fec_enet_register_version;
+
 	memset(buf, 0, regs->len);
 
 	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
@@ -2238,13 +2279,8 @@
 	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
 
-	if (pause->rx_pause || pause->autoneg) {
-		ndev->phydev->supported |= ADVERTISED_Pause;
-		ndev->phydev->advertising |= ADVERTISED_Pause;
-	} else {
-		ndev->phydev->supported &= ~ADVERTISED_Pause;
-		ndev->phydev->advertising &= ~ADVERTISED_Pause;
-	}
+	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
+			  pause->autoneg);
 
 	if (pause->autoneg) {
 		if (netif_running(ndev))
@@ -2468,30 +2504,31 @@
 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct device *dev = &fep->pdev->dev;
 	unsigned int cycle;
 
 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
 		return -EOPNOTSUPP;
 
 	if (ec->rx_max_coalesced_frames > 255) {
-		pr_err("Rx coalesced frames exceed hardware limitation\n");
+		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
 	if (ec->tx_max_coalesced_frames > 255) {
-		pr_err("Tx coalesced frame exceed hardware limitation\n");
+		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
 	cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
 	if (cycle > 0xFFFF) {
-		pr_err("Rx coalesced usec exceed hardware limitation\n");
+		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
 	cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
 	if (cycle > 0xFFFF) {
-		pr_err("Rx coalesced usec exceed hardware limitation\n");
+		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
@@ -3165,8 +3202,6 @@
 		return -ENOMEM;
 	}
 
-	memset(cbd_base, 0, bd_size);
-
 	/* Get the Ethernet address */
 	fec_get_mac(ndev);
 	/* make sure MAC we just acquired is programmed into the hw */
@@ -3361,7 +3396,6 @@
 	struct fec_platform_data *pdata;
 	struct net_device *ndev;
 	int i, irq, ret = 0;
-	struct resource *r;
 	const struct of_device_id *of_id;
 	static int dev_id;
 	struct device_node *np = pdev->dev.of_node, *phy_node;
@@ -3401,8 +3435,7 @@
 	/* Select default pin state */
 	pinctrl_pm_select_default_state(&pdev->dev);
 
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(fep->hwp)) {
 		ret = PTR_ERR(fep->hwp);
 		goto failed_ioremap;
@@ -3485,14 +3518,16 @@
 	ret = clk_prepare_enable(fep->clk_ipg);
 	if (ret)
 		goto failed_clk_ipg;
+	ret = clk_prepare_enable(fep->clk_ahb);
+	if (ret)
+		goto failed_clk_ahb;
 
-	fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
 	if (!IS_ERR(fep->reg_phy)) {
 		ret = regulator_enable(fep->reg_phy);
 		if (ret) {
 			dev_err(&pdev->dev,
 				"Failed to enable phy regulator: %d\n", ret);
-			clk_disable_unprepare(fep->clk_ipg);
 			goto failed_regulator;
 		}
 	} else {
@@ -3523,7 +3558,7 @@
 
 	for (i = 0; i < irq_cnt; i++) {
 		snprintf(irq_name, sizeof(irq_name), "int%d", i);
-		irq = platform_get_irq_byname(pdev, irq_name);
+		irq = platform_get_irq_byname_optional(pdev, irq_name);
 		if (irq < 0)
 			irq = platform_get_irq(pdev, i);
 		if (irq < 0) {
@@ -3575,9 +3610,12 @@
 	if (fep->reg_phy)
 		regulator_disable(fep->reg_phy);
 failed_reset:
-	pm_runtime_put(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 failed_regulator:
+	clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+	clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
 	fec_enet_clk_enable(ndev, false);
 failed_clk:
@@ -3598,6 +3636,11 @@
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct device_node *np = pdev->dev.of_node;
+	int ret;
+
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0)
+		return ret;
 
 	cancel_work_sync(&fep->tx_timeout_work);
 	fec_ptp_stop(pdev);
@@ -3605,13 +3648,17 @@
 	fec_enet_mii_remove(fep);
 	if (fep->reg_phy)
 		regulator_disable(fep->reg_phy);
-	pm_runtime_put(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
+
 	if (of_phy_is_fixed_link(np))
 		of_phy_deregister_fixed_link(np);
 	of_node_put(fep->phy_node);
 	free_netdev(ndev);
 
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
 	return 0;
 }
 
@@ -3701,6 +3748,7 @@
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
+	clk_disable_unprepare(fep->clk_ahb);
 	clk_disable_unprepare(fep->clk_ipg);
 
 	return 0;
@@ -3710,8 +3758,20 @@
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
+	int ret;
 
-	return clk_prepare_enable(fep->clk_ipg);
+	ret = clk_prepare_enable(fep->clk_ahb);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(fep->clk_ipg);
+	if (ret)
+		goto failed_clk_ipg;
+
+	return 0;
+
+failed_clk_ipg:
+	clk_disable_unprepare(fep->clk_ahb);
+	return ret;
 }
 
 static const struct dev_pm_ops fec_pm_ops = {
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 6d7269d..30cdb24 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -305,7 +305,8 @@
  * invariant will hold if you make sure that the netif_*_queue()
  * calls are done at the proper times.
  */
-static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
 	struct bcom_fec_bd *bd;
@@ -368,7 +369,7 @@
 		dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
 				 DMA_TO_DEVICE);
 
-		dev_kfree_skb_irq(skb);
+		dev_consume_skb_irq(skb);
 	}
 	spin_unlock(&priv->lock);
 
@@ -901,8 +902,8 @@
 	 * First try to read MAC address from DT
 	 */
 	mac_addr = of_get_mac_address(np);
-	if (mac_addr) {
-		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+	if (!IS_ERR(mac_addr)) {
+		ether_addr_copy(ndev->dev_addr, mac_addr);
 	} else {
 		struct mpc52xx_fec __iomem *fec = priv->fec;
 
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7e892b1..945643c 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -600,9 +600,9 @@
 
 	INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
 
-	irq = platform_get_irq_byname(pdev, "pps");
+	irq = platform_get_irq_byname_optional(pdev, "pps");
 	if (irq < 0)
-		irq = platform_get_irq(pdev, irq_idx);
+		irq = platform_get_irq_optional(pdev, irq_idx);
 	/* Failure to get an irq is not fatal,
 	 * only the PTP_CLOCK_PPS clock events should stop
 	 */
@@ -617,7 +617,7 @@
 	fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
 	if (IS_ERR(fep->ptp_clock)) {
 		fep->ptp_clock = NULL;
-		pr_err("ptp_clock_register failed\n");
+		dev_err(&pdev->dev, "ptp_clock_register failed\n");
 	}
 
 	schedule_delayed_work(&fep->time_keep, HZ);
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index dc0850b..0139cb9 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config FSL_FMAN
 	tristate "FMan support"
 	depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index e80fedb..210749b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2439,9 +2439,6 @@
  * buffers when not using jumbo frames.
  * Must be large enough to accommodate the network MTU, but small enough
  * to avoid wasting skb memory.
- *
- * Could be overridden once, at boot-time, via the
- * fm_set_max_frm() callback.
  */
 static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
 module_param(fsl_fm_max_frm, int, 0);
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index f54da3c..e1bdfed 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -144,7 +144,8 @@
 /* Hash Key extraction fields: */
 #define DEFAULT_HASH_KEY_EXTRACT_FIELDS		\
 	(KG_SCH_KN_IPSRC1 | KG_SCH_KN_IPDST1 | \
-	    KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST)
+	 KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST | \
+	 KG_SCH_KN_IPSEC_SPI)
 
 /* Default values to be used as hash key in case IPv4 or L4 (TCP, UDP)
  * don't exist in the frame
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bc6eb30..41c6fa2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -928,7 +928,7 @@
 	hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
 
 	/* Create element to be added to the driver hash table */
-	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
 	if (!hash_entry)
 		return -ENOMEM;
 	hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 4070593..f75b9c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -553,7 +553,7 @@
 	hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
 
 	/* Create element to be added to the driver hash table */
-	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
 	if (!hash_entry)
 		return -ENOMEM;
 	hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index a847b9c..7ab8095 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,11 +393,7 @@
 	 */
 
 	/* get local capabilities */
-	lcl_adv = 0;
-	if (phy_dev->advertising & ADVERTISED_Pause)
-		lcl_adv |= ADVERTISE_PAUSE_CAP;
-	if (phy_dev->advertising & ADVERTISED_Asym_Pause)
-		lcl_adv |= ADVERTISE_PAUSE_ASYM;
+	lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
 
 	/* get link partner capabilities */
 	rmt_adv = 0;
@@ -728,12 +724,12 @@
 
 	/* Get the MAC address */
 	mac_addr = of_get_mac_address(mac_node);
-	if (!mac_addr) {
+	if (IS_ERR(mac_addr)) {
 		dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
 		err = -EINVAL;
 		goto _return_of_get_parent;
 	}
-	memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+	ether_addr_copy(mac_dev->addr, mac_addr);
 
 	/* Get the port handles */
 	nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
@@ -859,9 +855,7 @@
 	if (err < 0)
 		dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
 
-	dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
-		 mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
-		 mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+	dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
 
 	priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
 	if (IS_ERR(priv->eth_dev)) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index be92229..245d9a6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config FS_ENET
        tristate "Freescale Ethernet Driver"
        depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2c2976a..3981c06 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -481,7 +481,8 @@
 }
 #endif
 
-static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct fs_enet_private *fep = netdev_priv(dev);
 	cbd_t __iomem *bdp;
@@ -500,7 +501,7 @@
 		nr_frags = skb_shinfo(skb)->nr_frags;
 		frag = skb_shinfo(skb)->frags;
 		for (i = 0; i < nr_frags; i++, frag++) {
-			if (!IS_ALIGNED(frag->page_offset, 4)) {
+			if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
 				is_aligned = 0;
 				break;
 			}
@@ -1013,8 +1014,8 @@
 	spin_lock_init(&fep->tx_lock);
 
 	mac_addr = of_get_mac_address(ofdev->dev.of_node);
-	if (mac_addr)
-		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+	if (!IS_ERR(mac_addr))
+		ether_addr_copy(ndev->dev_addr, mac_addr);
 
 	ret = fep->ops->allocate_bd(ndev);
 	if (ret)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index ac2c3f6..c6481bd 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
  * Provides Bus interface for MIIM regs
@@ -8,12 +9,6 @@
  * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
  *
  * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
  */
 
 #include <linux/kernel.h>
@@ -446,8 +441,8 @@
 		goto error;
 	}
 
-	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
-		(unsigned long long)res.start);
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np,
+		 (unsigned long long)res.start);
 
 	priv->map = of_iomap(np, 0);
 	if (!priv->map) {
@@ -473,7 +468,7 @@
 
 	if (data->get_tbipa) {
 		for_each_child_of_node(np, tbi) {
-			if (strcmp(tbi->type, "tbi-phy") == 0) {
+			if (of_node_is_type(tbi, "tbi-phy")) {
 				dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
 					tbi);
 				break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f27f9ba..51ad864 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* drivers/net/ethernet/freescale/gianfar.c
  *
  * Gianfar Ethernet Driver
@@ -12,11 +13,6 @@
  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  * Copyright 2007 MontaVista Software, Inc.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
  *  Gianfar:  AKA Lambda Draconis, "Dragon"
  *  RA 11 31 24.2
  *  Dec +69 19 52
@@ -102,8 +98,6 @@
 #include <linux/phy_fixed.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
 
 #include "gianfar.h"
 
@@ -111,43 +105,6 @@
 
 const char gfar_driver_version[] = "2.0";
 
-static int gfar_enet_open(struct net_device *dev);
-static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void gfar_reset_task(struct work_struct *work);
-static void gfar_timeout(struct net_device *dev);
-static int gfar_close(struct net_device *dev);
-static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
-				int alloc_cnt);
-static int gfar_set_mac_address(struct net_device *dev);
-static int gfar_change_mtu(struct net_device *dev, int new_mtu);
-static irqreturn_t gfar_error(int irq, void *dev_id);
-static irqreturn_t gfar_transmit(int irq, void *dev_id);
-static irqreturn_t gfar_interrupt(int irq, void *dev_id);
-static void adjust_link(struct net_device *dev);
-static noinline void gfar_update_link_state(struct gfar_private *priv);
-static int init_phy(struct net_device *dev);
-static int gfar_probe(struct platform_device *ofdev);
-static int gfar_remove(struct platform_device *ofdev);
-static void free_skb_resources(struct gfar_private *priv);
-static void gfar_set_multi(struct net_device *dev);
-static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
-static void gfar_configure_serdes(struct net_device *dev);
-static int gfar_poll_rx(struct napi_struct *napi, int budget);
-static int gfar_poll_tx(struct napi_struct *napi, int budget);
-static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
-static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gfar_netpoll(struct net_device *dev);
-#endif
-int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
-static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
-static void gfar_halt_nodisable(struct gfar_private *priv);
-static void gfar_clear_exact_match(struct net_device *dev);
-static void gfar_set_mac_for_addr(struct net_device *dev, int num,
-				  const u8 *addr);
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 MODULE_LICENSE("GPL");
@@ -168,138 +125,6 @@
 	bdp->lstatus = cpu_to_be32(lstatus);
 }
 
-static void gfar_init_bds(struct net_device *ndev)
-{
-	struct gfar_private *priv = netdev_priv(ndev);
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	struct gfar_priv_tx_q *tx_queue = NULL;
-	struct gfar_priv_rx_q *rx_queue = NULL;
-	struct txbd8 *txbdp;
-	u32 __iomem *rfbptr;
-	int i, j;
-
-	for (i = 0; i < priv->num_tx_queues; i++) {
-		tx_queue = priv->tx_queue[i];
-		/* Initialize some variables in our dev structure */
-		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
-		tx_queue->dirty_tx = tx_queue->tx_bd_base;
-		tx_queue->cur_tx = tx_queue->tx_bd_base;
-		tx_queue->skb_curtx = 0;
-		tx_queue->skb_dirtytx = 0;
-
-		/* Initialize Transmit Descriptor Ring */
-		txbdp = tx_queue->tx_bd_base;
-		for (j = 0; j < tx_queue->tx_ring_size; j++) {
-			txbdp->lstatus = 0;
-			txbdp->bufPtr = 0;
-			txbdp++;
-		}
-
-		/* Set the last descriptor in the ring to indicate wrap */
-		txbdp--;
-		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
-					    TXBD_WRAP);
-	}
-
-	rfbptr = &regs->rfbptr0;
-	for (i = 0; i < priv->num_rx_queues; i++) {
-		rx_queue = priv->rx_queue[i];
-
-		rx_queue->next_to_clean = 0;
-		rx_queue->next_to_use = 0;
-		rx_queue->next_to_alloc = 0;
-
-		/* make sure next_to_clean != next_to_use after this
-		 * by leaving at least 1 unused descriptor
-		 */
-		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
-
-		rx_queue->rfbptr = rfbptr;
-		rfbptr += 2;
-	}
-}
-
-static int gfar_alloc_skb_resources(struct net_device *ndev)
-{
-	void *vaddr;
-	dma_addr_t addr;
-	int i, j;
-	struct gfar_private *priv = netdev_priv(ndev);
-	struct device *dev = priv->dev;
-	struct gfar_priv_tx_q *tx_queue = NULL;
-	struct gfar_priv_rx_q *rx_queue = NULL;
-
-	priv->total_tx_ring_size = 0;
-	for (i = 0; i < priv->num_tx_queues; i++)
-		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
-
-	priv->total_rx_ring_size = 0;
-	for (i = 0; i < priv->num_rx_queues; i++)
-		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
-
-	/* Allocate memory for the buffer descriptors */
-	vaddr = dma_alloc_coherent(dev,
-				   (priv->total_tx_ring_size *
-				    sizeof(struct txbd8)) +
-				   (priv->total_rx_ring_size *
-				    sizeof(struct rxbd8)),
-				   &addr, GFP_KERNEL);
-	if (!vaddr)
-		return -ENOMEM;
-
-	for (i = 0; i < priv->num_tx_queues; i++) {
-		tx_queue = priv->tx_queue[i];
-		tx_queue->tx_bd_base = vaddr;
-		tx_queue->tx_bd_dma_base = addr;
-		tx_queue->dev = ndev;
-		/* enet DMA only understands physical addresses */
-		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
-		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
-	}
-
-	/* Start the rx descriptor ring where the tx ring leaves off */
-	for (i = 0; i < priv->num_rx_queues; i++) {
-		rx_queue = priv->rx_queue[i];
-		rx_queue->rx_bd_base = vaddr;
-		rx_queue->rx_bd_dma_base = addr;
-		rx_queue->ndev = ndev;
-		rx_queue->dev = dev;
-		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
-		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
-	}
-
-	/* Setup the skbuff rings */
-	for (i = 0; i < priv->num_tx_queues; i++) {
-		tx_queue = priv->tx_queue[i];
-		tx_queue->tx_skbuff =
-			kmalloc_array(tx_queue->tx_ring_size,
-				      sizeof(*tx_queue->tx_skbuff),
-				      GFP_KERNEL);
-		if (!tx_queue->tx_skbuff)
-			goto cleanup;
-
-		for (j = 0; j < tx_queue->tx_ring_size; j++)
-			tx_queue->tx_skbuff[j] = NULL;
-	}
-
-	for (i = 0; i < priv->num_rx_queues; i++) {
-		rx_queue = priv->rx_queue[i];
-		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
-					    sizeof(*rx_queue->rx_buff),
-					    GFP_KERNEL);
-		if (!rx_queue->rx_buff)
-			goto cleanup;
-	}
-
-	gfar_init_bds(ndev);
-
-	return 0;
-
-cleanup:
-	free_skb_resources(priv);
-	return -ENOMEM;
-}
-
 static void gfar_init_tx_rx_base(struct gfar_private *priv)
 {
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
@@ -450,7 +275,7 @@
 	}
 }
 
-void gfar_configure_coalescing_all(struct gfar_private *priv)
+static void gfar_configure_coalescing_all(struct gfar_private *priv)
 {
 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 }
@@ -483,6 +308,62 @@
 	return &dev->stats;
 }
 
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table.  The table is controlled through 8 32-bit registers:
+ * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7.  This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry.
+ */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+	u32 tempval;
+	struct gfar_private *priv = netdev_priv(dev);
+	u32 result = ether_crc(ETH_ALEN, addr);
+	int width = priv->hash_width;
+	u8 whichbit = (result >> (32 - width)) & 0x1f;
+	u8 whichreg = result >> (32 - width + 5);
+	u32 value = (1 << (31-whichbit));
+
+	tempval = gfar_read(priv->hash_regs[whichreg]);
+	tempval |= value;
+	gfar_write(priv->hash_regs[whichreg], tempval);
+}
+
+/* There are multiple MAC Address register pairs on some controllers
+ * This function sets the numth pair to a given address
+ */
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+				  const u8 *addr)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+	u32 __iomem *macptr = &regs->macstnaddr1;
+
+	macptr += num*2;
+
+	/* For a station address of 0x12345678ABCD in transmission
+	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
+	 * MACnADDR2 is set to 0x34120000.
+	 */
+	tempval = (addr[5] << 24) | (addr[4] << 16) |
+		  (addr[3] << 8)  |  addr[2];
+
+	gfar_write(macptr, tempval);
+
+	tempval = (addr[1] << 24) | (addr[0] << 16);
+
+	gfar_write(macptr+1, tempval);
+}
+
 static int gfar_set_mac_addr(struct net_device *dev, void *p)
 {
 	eth_mac_addr(dev, p);
@@ -492,23 +373,6 @@
 	return 0;
 }
 
-static const struct net_device_ops gfar_netdev_ops = {
-	.ndo_open = gfar_enet_open,
-	.ndo_start_xmit = gfar_start_xmit,
-	.ndo_stop = gfar_close,
-	.ndo_change_mtu = gfar_change_mtu,
-	.ndo_set_features = gfar_set_features,
-	.ndo_set_rx_mode = gfar_set_multi,
-	.ndo_tx_timeout = gfar_timeout,
-	.ndo_do_ioctl = gfar_ioctl,
-	.ndo_get_stats = gfar_get_stats,
-	.ndo_set_mac_address = gfar_set_mac_addr,
-	.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller = gfar_netpoll,
-#endif
-};
-
 static void gfar_ints_disable(struct gfar_private *priv)
 {
 	int i;
@@ -722,16 +586,59 @@
 	int num = 0;
 
 	for_each_available_child_of_node(np, child)
-		if (!of_node_cmp(child->name, "queue-group"))
+		if (of_node_name_eq(child, "queue-group"))
 			num++;
 
 	return num;
 }
 
+/* Reads the controller's registers to determine what interface
+ * connects it to the PHY.
+ */
+static phy_interface_t gfar_get_interface(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 ecntrl;
+
+	ecntrl = gfar_read(&regs->ecntrl);
+
+	if (ecntrl & ECNTRL_SGMII_MODE)
+		return PHY_INTERFACE_MODE_SGMII;
+
+	if (ecntrl & ECNTRL_TBI_MODE) {
+		if (ecntrl & ECNTRL_REDUCED_MODE)
+			return PHY_INTERFACE_MODE_RTBI;
+		else
+			return PHY_INTERFACE_MODE_TBI;
+	}
+
+	if (ecntrl & ECNTRL_REDUCED_MODE) {
+		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
+			return PHY_INTERFACE_MODE_RMII;
+		}
+		else {
+			phy_interface_t interface = priv->interface;
+
+			/* This isn't autodetected right now, so it must
+			 * be set by the device tree or platform code.
+			 */
+			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+				return PHY_INTERFACE_MODE_RGMII_ID;
+
+			return PHY_INTERFACE_MODE_RGMII;
+		}
+	}
+
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+		return PHY_INTERFACE_MODE_GMII;
+
+	return PHY_INTERFACE_MODE_MII;
+}
+
 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 {
 	const char *model;
-	const char *ctype;
 	const void *mac_addr;
 	int err = 0, i;
 	struct net_device *dev = NULL;
@@ -840,7 +747,7 @@
 	/* Parse and initialize group specific information */
 	if (priv->mode == MQ_MG_MODE) {
 		for_each_available_child_of_node(np, child) {
-			if (of_node_cmp(child->name, "queue-group"))
+			if (!of_node_name_eq(child, "queue-group"))
 				continue;
 
 			err = gfar_parse_group(child, priv, model);
@@ -873,8 +780,8 @@
 
 	mac_addr = of_get_mac_address(np);
 
-	if (mac_addr)
-		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+	if (!IS_ERR(mac_addr))
+		ether_addr_copy(dev->dev_addr, mac_addr);
 
 	if (model && !strcasecmp(model, "TSEC"))
 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
@@ -894,13 +801,15 @@
 				     FSL_GIANFAR_DEV_HAS_TIMER |
 				     FSL_GIANFAR_DEV_HAS_RX_FILER;
 
-	err = of_property_read_string(np, "phy-connection-type", &ctype);
-
-	/* We only care about rgmii-id.  The rest are autodetected */
-	if (err == 0 && !strcmp(ctype, "rgmii-id"))
-		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
+	/* Use PHY connection type from the DT node if one is specified there.
+	 * rgmii-id really needs to be specified. Other types can be
+	 * detected by hardware
+	 */
+	err = of_get_phy_mode(np);
+	if (err >= 0)
+		priv->interface = err;
 	else
-		priv->interface = PHY_INTERFACE_MODE_MII;
+		priv->interface = gfar_get_interface(dev);
 
 	if (of_find_property(np, "fsl,magic-packet", NULL))
 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
@@ -936,85 +845,6 @@
 	return err;
 }
 
-static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
-{
-	struct hwtstamp_config config;
-	struct gfar_private *priv = netdev_priv(netdev);
-
-	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
-		return -EFAULT;
-
-	/* reserved for future extensions */
-	if (config.flags)
-		return -EINVAL;
-
-	switch (config.tx_type) {
-	case HWTSTAMP_TX_OFF:
-		priv->hwts_tx_en = 0;
-		break;
-	case HWTSTAMP_TX_ON:
-		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
-			return -ERANGE;
-		priv->hwts_tx_en = 1;
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	switch (config.rx_filter) {
-	case HWTSTAMP_FILTER_NONE:
-		if (priv->hwts_rx_en) {
-			priv->hwts_rx_en = 0;
-			reset_gfar(netdev);
-		}
-		break;
-	default:
-		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
-			return -ERANGE;
-		if (!priv->hwts_rx_en) {
-			priv->hwts_rx_en = 1;
-			reset_gfar(netdev);
-		}
-		config.rx_filter = HWTSTAMP_FILTER_ALL;
-		break;
-	}
-
-	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-		-EFAULT : 0;
-}
-
-static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
-{
-	struct hwtstamp_config config;
-	struct gfar_private *priv = netdev_priv(netdev);
-
-	config.flags = 0;
-	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
-	config.rx_filter = (priv->hwts_rx_en ?
-			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
-	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-		-EFAULT : 0;
-}
-
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-	struct phy_device *phydev = dev->phydev;
-
-	if (!netif_running(dev))
-		return -EINVAL;
-
-	if (cmd == SIOCSHWTSTAMP)
-		return gfar_hwtstamp_set(dev, rq);
-	if (cmd == SIOCGHWTSTAMP)
-		return gfar_hwtstamp_get(dev, rq);
-
-	if (!phydev)
-		return -ENODEV;
-
-	return phy_mii_ioctl(phydev, rq, cmd);
-}
-
 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
 				   u32 class)
 {
@@ -1138,6 +968,2183 @@
 			 priv->errata);
 }
 
+static void gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+		priv->extended_hash = 1;
+		priv->hash_width = 9;
+
+		priv->hash_regs[0] = &regs->igaddr0;
+		priv->hash_regs[1] = &regs->igaddr1;
+		priv->hash_regs[2] = &regs->igaddr2;
+		priv->hash_regs[3] = &regs->igaddr3;
+		priv->hash_regs[4] = &regs->igaddr4;
+		priv->hash_regs[5] = &regs->igaddr5;
+		priv->hash_regs[6] = &regs->igaddr6;
+		priv->hash_regs[7] = &regs->igaddr7;
+		priv->hash_regs[8] = &regs->gaddr0;
+		priv->hash_regs[9] = &regs->gaddr1;
+		priv->hash_regs[10] = &regs->gaddr2;
+		priv->hash_regs[11] = &regs->gaddr3;
+		priv->hash_regs[12] = &regs->gaddr4;
+		priv->hash_regs[13] = &regs->gaddr5;
+		priv->hash_regs[14] = &regs->gaddr6;
+		priv->hash_regs[15] = &regs->gaddr7;
+
+	} else {
+		priv->extended_hash = 0;
+		priv->hash_width = 8;
+
+		priv->hash_regs[0] = &regs->gaddr0;
+		priv->hash_regs[1] = &regs->gaddr1;
+		priv->hash_regs[2] = &regs->gaddr2;
+		priv->hash_regs[3] = &regs->gaddr3;
+		priv->hash_regs[4] = &regs->gaddr4;
+		priv->hash_regs[5] = &regs->gaddr5;
+		priv->hash_regs[6] = &regs->gaddr6;
+		priv->hash_regs[7] = &regs->gaddr7;
+	}
+}
+
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+	u32 res;
+
+	/* Normaly TSEC should not hang on GRS commands, so we should
+	 * actually wait for IEVENT_GRSC flag.
+	 */
+	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
+		return 0;
+
+	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+	 * and the Rx can be safely reset.
+	 */
+	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+	res &= 0x7f807f80;
+	if ((res & 0xffff) == (res >> 16))
+		return 1;
+
+	return 0;
+}
+
+/* Halt the receive and transmit queues */
+static void gfar_halt_nodisable(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+	unsigned int timeout;
+	int stopped;
+
+	gfar_ints_disable(priv);
+
+	if (gfar_is_dma_stopped(priv))
+		return;
+
+	/* Stop the DMA, and wait for it to stop */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+	gfar_write(&regs->dmactrl, tempval);
+
+retry:
+	timeout = 1000;
+	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
+		cpu_relax();
+		timeout--;
+	}
+
+	if (!timeout)
+		stopped = gfar_is_dma_stopped(priv);
+
+	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
+	    !__gfar_is_rx_idle(priv))
+		goto retry;
+}
+
+/* Halt the receive and transmit queues */
+static void gfar_halt(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+
+	/* Dissable the Rx/Tx hw queues */
+	gfar_write(&regs->rqueue, 0);
+	gfar_write(&regs->tqueue, 0);
+
+	mdelay(10);
+
+	gfar_halt_nodisable(priv);
+
+	/* Disable Rx/Tx DMA */
+	tempval = gfar_read(&regs->maccfg1);
+	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+	gfar_write(&regs->maccfg1, tempval);
+}
+
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
+{
+	struct txbd8 *txbdp;
+	struct gfar_private *priv = netdev_priv(tx_queue->dev);
+	int i, j;
+
+	txbdp = tx_queue->tx_bd_base;
+
+	for (i = 0; i < tx_queue->tx_ring_size; i++) {
+		if (!tx_queue->tx_skbuff[i])
+			continue;
+
+		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
+				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
+		txbdp->lstatus = 0;
+		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+		     j++) {
+			txbdp++;
+			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
+				       be16_to_cpu(txbdp->length),
+				       DMA_TO_DEVICE);
+		}
+		txbdp++;
+		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+		tx_queue->tx_skbuff[i] = NULL;
+	}
+	kfree(tx_queue->tx_skbuff);
+	tx_queue->tx_skbuff = NULL;
+}
+
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+	int i;
+
+	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+	dev_kfree_skb(rx_queue->skb);
+
+	for (i = 0; i < rx_queue->rx_ring_size; i++) {
+		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
+		rxbdp->lstatus = 0;
+		rxbdp->bufPtr = 0;
+		rxbdp++;
+
+		if (!rxb->page)
+			continue;
+
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_page(rxb->page);
+
+		rxb->page = NULL;
+	}
+
+	kfree(rx_queue->rx_buff);
+	rx_queue->rx_buff = NULL;
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff
+ */
+static void free_skb_resources(struct gfar_private *priv)
+{
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int i;
+
+	/* Go through all the buffer descriptors and free their data buffers */
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		struct netdev_queue *txq;
+
+		tx_queue = priv->tx_queue[i];
+		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
+		if (tx_queue->tx_skbuff)
+			free_skb_tx_queue(tx_queue);
+		netdev_tx_reset_queue(txq);
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		if (rx_queue->rx_buff)
+			free_skb_rx_queue(rx_queue);
+	}
+
+	dma_free_coherent(priv->dev,
+			  sizeof(struct txbd8) * priv->total_tx_ring_size +
+			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
+			  priv->tx_queue[0]->tx_bd_base,
+			  priv->tx_queue[0]->tx_bd_dma_base);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	netif_tx_stop_all_queues(dev);
+
+	smp_mb__before_atomic();
+	set_bit(GFAR_DOWN, &priv->state);
+	smp_mb__after_atomic();
+
+	disable_napi(priv);
+
+	/* disable ints and gracefully shut down Rx/Tx DMA */
+	gfar_halt(priv);
+
+	phy_stop(dev->phydev);
+
+	free_skb_resources(priv);
+}
+
+static void gfar_start(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+	int i = 0;
+
+	/* Enable Rx/Tx hw queues */
+	gfar_write(&regs->rqueue, priv->rqueue);
+	gfar_write(&regs->tqueue, priv->tqueue);
+
+	/* Initialize DMACTRL to have WWR and WOP */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval |= DMACTRL_INIT_SETTINGS;
+	gfar_write(&regs->dmactrl, tempval);
+
+	/* Make sure we aren't stopped */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+	gfar_write(&regs->dmactrl, tempval);
+
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear THLT/RHLT, so that the DMA starts polling now */
+		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+	}
+
+	/* Enable Rx/Tx DMA */
+	tempval = gfar_read(&regs->maccfg1);
+	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+	gfar_write(&regs->maccfg1, tempval);
+
+	gfar_ints_enable(priv);
+
+	netif_trans_update(priv->ndev); /* prevent tx timeout */
+}
+
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
+{
+	struct page *page;
+	dma_addr_t addr;
+
+	page = dev_alloc_page();
+	if (unlikely(!page))
+		return false;
+
+	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+		__free_page(page);
+
+		return false;
+	}
+
+	rxb->dma = addr;
+	rxb->page = page;
+	rxb->page_offset = 0;
+
+	return true;
+}
+
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
+{
+	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+	struct gfar_extra_stats *estats = &priv->extra_stats;
+
+	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+	atomic64_inc(&estats->rx_alloc_err);
+}
+
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+				int alloc_cnt)
+{
+	struct rxbd8 *bdp;
+	struct gfar_rx_buff *rxb;
+	int i;
+
+	i = rx_queue->next_to_use;
+	bdp = &rx_queue->rx_bd_base[i];
+	rxb = &rx_queue->rx_buff[i];
+
+	while (alloc_cnt--) {
+		/* try reuse page */
+		if (unlikely(!rxb->page)) {
+			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+				gfar_rx_alloc_err(rx_queue);
+				break;
+			}
+		}
+
+		/* Setup the new RxBD */
+		gfar_init_rxbdp(rx_queue, bdp,
+				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+		/* Update to the next pointer */
+		bdp++;
+		rxb++;
+
+		if (unlikely(++i == rx_queue->rx_ring_size)) {
+			i = 0;
+			bdp = rx_queue->rx_bd_base;
+			rxb = rx_queue->rx_buff;
+		}
+	}
+
+	rx_queue->next_to_use = i;
+	rx_queue->next_to_alloc = i;
+}
+
+static void gfar_init_bds(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	struct txbd8 *txbdp;
+	u32 __iomem *rfbptr;
+	int i, j;
+
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		/* Initialize some variables in our dev structure */
+		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+		tx_queue->dirty_tx = tx_queue->tx_bd_base;
+		tx_queue->cur_tx = tx_queue->tx_bd_base;
+		tx_queue->skb_curtx = 0;
+		tx_queue->skb_dirtytx = 0;
+
+		/* Initialize Transmit Descriptor Ring */
+		txbdp = tx_queue->tx_bd_base;
+		for (j = 0; j < tx_queue->tx_ring_size; j++) {
+			txbdp->lstatus = 0;
+			txbdp->bufPtr = 0;
+			txbdp++;
+		}
+
+		/* Set the last descriptor in the ring to indicate wrap */
+		txbdp--;
+		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
+					    TXBD_WRAP);
+	}
+
+	rfbptr = &regs->rfbptr0;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+
+		rx_queue->next_to_clean = 0;
+		rx_queue->next_to_use = 0;
+		rx_queue->next_to_alloc = 0;
+
+		/* make sure next_to_clean != next_to_use after this
+		 * by leaving at least 1 unused descriptor
+		 */
+		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
+
+		rx_queue->rfbptr = rfbptr;
+		rfbptr += 2;
+	}
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+	void *vaddr;
+	dma_addr_t addr;
+	int i, j;
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct device *dev = priv->dev;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+
+	priv->total_tx_ring_size = 0;
+	for (i = 0; i < priv->num_tx_queues; i++)
+		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+	priv->total_rx_ring_size = 0;
+	for (i = 0; i < priv->num_rx_queues; i++)
+		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+	/* Allocate memory for the buffer descriptors */
+	vaddr = dma_alloc_coherent(dev,
+				   (priv->total_tx_ring_size *
+				    sizeof(struct txbd8)) +
+				   (priv->total_rx_ring_size *
+				    sizeof(struct rxbd8)),
+				   &addr, GFP_KERNEL);
+	if (!vaddr)
+		return -ENOMEM;
+
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		tx_queue->tx_bd_base = vaddr;
+		tx_queue->tx_bd_dma_base = addr;
+		tx_queue->dev = ndev;
+		/* enet DMA only understands physical addresses */
+		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+	}
+
+	/* Start the rx descriptor ring where the tx ring leaves off */
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->rx_bd_base = vaddr;
+		rx_queue->rx_bd_dma_base = addr;
+		rx_queue->ndev = ndev;
+		rx_queue->dev = dev;
+		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+	}
+
+	/* Setup the skbuff rings */
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		tx_queue->tx_skbuff =
+			kmalloc_array(tx_queue->tx_ring_size,
+				      sizeof(*tx_queue->tx_skbuff),
+				      GFP_KERNEL);
+		if (!tx_queue->tx_skbuff)
+			goto cleanup;
+
+		for (j = 0; j < tx_queue->tx_ring_size; j++)
+			tx_queue->tx_skbuff[j] = NULL;
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+					    sizeof(*rx_queue->rx_buff),
+					    GFP_KERNEL);
+		if (!rx_queue->rx_buff)
+			goto cleanup;
+	}
+
+	gfar_init_bds(ndev);
+
+	return 0;
+
+cleanup:
+	free_skb_resources(priv);
+	return -ENOMEM;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	int err;
+
+	gfar_mac_reset(priv);
+
+	err = gfar_alloc_skb_resources(ndev);
+	if (err)
+		return err;
+
+	gfar_init_tx_rx_base(priv);
+
+	smp_mb__before_atomic();
+	clear_bit(GFAR_DOWN, &priv->state);
+	smp_mb__after_atomic();
+
+	/* Start Rx/Tx DMA and enable the interrupts */
+	gfar_start(priv);
+
+	/* force link state update after mac reset */
+	priv->oldlink = 0;
+	priv->oldspeed = 0;
+	priv->oldduplex = -1;
+
+	phy_start(ndev->phydev);
+
+	enable_napi(priv);
+
+	netif_tx_wake_all_queues(ndev);
+
+	return 0;
+}
+
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	struct phy_device *phydev = ndev->phydev;
+	u32 val = 0;
+
+	if (!phydev->duplex)
+		return val;
+
+	if (!priv->pause_aneg_en) {
+		if (priv->tx_pause_en)
+			val |= MACCFG1_TX_FLOW;
+		if (priv->rx_pause_en)
+			val |= MACCFG1_RX_FLOW;
+	} else {
+		u16 lcl_adv, rmt_adv;
+		u8 flowctrl;
+		/* get link partner capabilities */
+		rmt_adv = 0;
+		if (phydev->pause)
+			rmt_adv = LPA_PAUSE_CAP;
+		if (phydev->asym_pause)
+			rmt_adv |= LPA_PAUSE_ASYM;
+
+		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+		if (flowctrl & FLOW_CTRL_TX)
+			val |= MACCFG1_TX_FLOW;
+		if (flowctrl & FLOW_CTRL_RX)
+			val |= MACCFG1_RX_FLOW;
+	}
+
+	return val;
+}
+
+static noinline void gfar_update_link_state(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	struct net_device *ndev = priv->ndev;
+	struct phy_device *phydev = ndev->phydev;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int i;
+
+	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
+		return;
+
+	if (phydev->link) {
+		u32 tempval1 = gfar_read(&regs->maccfg1);
+		u32 tempval = gfar_read(&regs->maccfg2);
+		u32 ecntrl = gfar_read(&regs->ecntrl);
+		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
+
+		if (phydev->duplex != priv->oldduplex) {
+			if (!(phydev->duplex))
+				tempval &= ~(MACCFG2_FULL_DUPLEX);
+			else
+				tempval |= MACCFG2_FULL_DUPLEX;
+
+			priv->oldduplex = phydev->duplex;
+		}
+
+		if (phydev->speed != priv->oldspeed) {
+			switch (phydev->speed) {
+			case 1000:
+				tempval =
+				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+				ecntrl &= ~(ECNTRL_R100);
+				break;
+			case 100:
+			case 10:
+				tempval =
+				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+				/* Reduced mode distinguishes
+				 * between 10 and 100
+				 */
+				if (phydev->speed == SPEED_100)
+					ecntrl |= ECNTRL_R100;
+				else
+					ecntrl &= ~(ECNTRL_R100);
+				break;
+			default:
+				netif_warn(priv, link, priv->ndev,
+					   "Ack!  Speed (%d) is not 10/100/1000!\n",
+					   phydev->speed);
+				break;
+			}
+
+			priv->oldspeed = phydev->speed;
+		}
+
+		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+		tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+		/* Turn last free buffer recording on */
+		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
+			for (i = 0; i < priv->num_rx_queues; i++) {
+				u32 bdp_dma;
+
+				rx_queue = priv->rx_queue[i];
+				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+				gfar_write(rx_queue->rfbptr, bdp_dma);
+			}
+
+			priv->tx_actual_en = 1;
+		}
+
+		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
+			priv->tx_actual_en = 0;
+
+		gfar_write(&regs->maccfg1, tempval1);
+		gfar_write(&regs->maccfg2, tempval);
+		gfar_write(&regs->ecntrl, ecntrl);
+
+		if (!priv->oldlink)
+			priv->oldlink = 1;
+
+	} else if (priv->oldlink) {
+		priv->oldlink = 0;
+		priv->oldspeed = 0;
+		priv->oldduplex = -1;
+	}
+
+	if (netif_msg_link(priv))
+		phy_print_status(phydev);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state.  The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct phy_device *phydev = dev->phydev;
+
+	if (unlikely(phydev->link != priv->oldlink ||
+		     (phydev->link && (phydev->duplex != priv->oldduplex ||
+				       phydev->speed != priv->oldspeed))))
+		gfar_update_link_state(priv);
+}
+
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip.  We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the TBIPA register.  We assume
+ * that the TBIPA register is valid.  Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void gfar_configure_serdes(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct phy_device *tbiphy;
+
+	if (!priv->tbi_node) {
+		dev_warn(&dev->dev, "error: SGMII mode requires that the "
+				    "device tree specify a tbi-handle\n");
+		return;
+	}
+
+	tbiphy = of_phy_find_device(priv->tbi_node);
+	if (!tbiphy) {
+		dev_err(&dev->dev, "error: Could not get TBI device\n");
+		return;
+	}
+
+	/* If the link is already up, we must already be ok, and don't need to
+	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
+	 * everything for us?  Resetting it takes the link down and requires
+	 * several seconds for it to come back.
+	 */
+	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
+		put_device(&tbiphy->mdio.dev);
+		return;
+	}
+
+	/* Single clk mode, mii mode off(for serdes communication) */
+	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+
+	phy_write(tbiphy, MII_ADVERTISE,
+		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+		  ADVERTISE_1000XPSE_ASYM);
+
+	phy_write(tbiphy, MII_BMCR,
+		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+		  BMCR_SPEED1000);
+
+	put_device(&tbiphy->mdio.dev);
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *dev)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+	struct gfar_private *priv = netdev_priv(dev);
+	phy_interface_t interface = priv->interface;
+	struct phy_device *phydev;
+	struct ethtool_eee edata;
+
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
+	priv->oldlink = 0;
+	priv->oldspeed = 0;
+	priv->oldduplex = -1;
+
+	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+				interface);
+	if (!phydev) {
+		dev_err(&dev->dev, "could not attach to PHY\n");
+		return -ENODEV;
+	}
+
+	if (interface == PHY_INTERFACE_MODE_SGMII)
+		gfar_configure_serdes(dev);
+
+	/* Remove any features not supported by the controller */
+	linkmode_and(phydev->supported, phydev->supported, mask);
+	linkmode_copy(phydev->advertising, phydev->supported);
+
+	/* Add support for flow control */
+	phy_support_asym_pause(phydev);
+
+	/* disable EEE autoneg, EEE not supported by eTSEC */
+	memset(&edata, 0, sizeof(struct ethtool_eee));
+	phy_ethtool_set_eee(phydev, &edata);
+
+	return 0;
+}
+
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
+{
+	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
+
+	memset(fcb, 0, GMAC_FCB_LEN);
+
+	return fcb;
+}
+
+static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
+				    int fcb_length)
+{
+	/* If we're here, it's a IP packet with a TCP or UDP
+	 * payload.  We set it to checksum, using a pseudo-header
+	 * we provide
+	 */
+	u8 flags = TXFCB_DEFAULT;
+
+	/* Tell the controller what the protocol is
+	 * And provide the already calculated phcs
+	 */
+	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+		flags |= TXFCB_UDP;
+		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
+	} else
+		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
+
+	/* l3os is the distance between the start of the
+	 * frame (skb->data) and the start of the IP hdr.
+	 * l4os is the distance between the start of the
+	 * l3 hdr and the l4 hdr
+	 */
+	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
+	fcb->l4os = skb_network_header_len(skb);
+
+	fcb->flags = flags;
+}
+
+static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+{
+	fcb->flags |= TXFCB_VLN;
+	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
+}
+
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+				      struct txbd8 *base, int ring_size)
+{
+	struct txbd8 *new_bd = bdp + stride;
+
+	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+				      int ring_size)
+{
+	return skip_txbd(bdp, 1, base, ring_size);
+}
+
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+				       unsigned long fcb_addr)
+{
+	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+	       (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+				       unsigned int len)
+{
+	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+	       (len > 2500));
+}
+
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
+static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct netdev_queue *txq;
+	struct gfar __iomem *regs = NULL;
+	struct txfcb *fcb = NULL;
+	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
+	u32 lstatus;
+	skb_frag_t *frag;
+	int i, rq = 0;
+	int do_tstamp, do_csum, do_vlan;
+	u32 bufaddr;
+	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
+
+	rq = skb->queue_mapping;
+	tx_queue = priv->tx_queue[rq];
+	txq = netdev_get_tx_queue(dev, rq);
+	base = tx_queue->tx_bd_base;
+	regs = tx_queue->grp->regs;
+
+	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+	do_vlan = skb_vlan_tag_present(skb);
+	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+		    priv->hwts_tx_en;
+
+	if (do_csum || do_vlan)
+		fcb_len = GMAC_FCB_LEN;
+
+	/* check if time stamp should be generated */
+	if (unlikely(do_tstamp))
+		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+
+	/* make space for additional header when fcb is needed */
+	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
+		struct sk_buff *skb_new;
+
+		skb_new = skb_realloc_headroom(skb, fcb_len);
+		if (!skb_new) {
+			dev->stats.tx_errors++;
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+
+		if (skb->sk)
+			skb_set_owner_w(skb_new, skb->sk);
+		dev_consume_skb_any(skb);
+		skb = skb_new;
+	}
+
+	/* total number of fragments in the SKB */
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
+	/* calculate the required number of TxBDs for this skb */
+	if (unlikely(do_tstamp))
+		nr_txbds = nr_frags + 2;
+	else
+		nr_txbds = nr_frags + 1;
+
+	/* check if there is space to queue this packet */
+	if (nr_txbds > tx_queue->num_txbdfree) {
+		/* no space, stop the queue */
+		netif_tx_stop_queue(txq);
+		dev->stats.tx_fifo_errors++;
+		return NETDEV_TX_BUSY;
+	}
+
+	/* Update transmit stats */
+	bytes_sent = skb->len;
+	tx_queue->stats.tx_bytes += bytes_sent;
+	/* keep Tx bytes on wire for BQL accounting */
+	GFAR_CB(skb)->bytes_sent = bytes_sent;
+	tx_queue->stats.tx_packets++;
+
+	txbdp = txbdp_start = tx_queue->cur_tx;
+	lstatus = be32_to_cpu(txbdp->lstatus);
+
+	/* Add TxPAL between FCB and frame if required */
+	if (unlikely(do_tstamp)) {
+		skb_push(skb, GMAC_TXPAL_LEN);
+		memset(skb->data, 0, GMAC_TXPAL_LEN);
+	}
+
+	/* Add TxFCB if required */
+	if (fcb_len) {
+		fcb = gfar_add_fcb(skb);
+		lstatus |= BD_LFLAG(TXBD_TOE);
+	}
+
+	/* Set up checksumming */
+	if (do_csum) {
+		gfar_tx_checksum(skb, fcb, fcb_len);
+
+		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
+			__skb_pull(skb, GMAC_FCB_LEN);
+			skb_checksum_help(skb);
+			if (do_vlan || do_tstamp) {
+				/* put back a new fcb for vlan/tstamp TOE */
+				fcb = gfar_add_fcb(skb);
+			} else {
+				/* Tx TOE not used */
+				lstatus &= ~(BD_LFLAG(TXBD_TOE));
+				fcb = NULL;
+			}
+		}
+	}
+
+	if (do_vlan)
+		gfar_tx_vlan(skb, fcb);
+
+	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+				 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+		goto dma_map_err;
+
+	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
+
+	/* Time stamp insertion requires one additional TxBD */
+	if (unlikely(do_tstamp))
+		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+						 tx_queue->tx_ring_size);
+
+	if (likely(!nr_frags)) {
+		if (likely(!do_tstamp))
+			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+	} else {
+		u32 lstatus_start = lstatus;
+
+		/* Place the fragment addresses and lengths into the TxBDs */
+		frag = &skb_shinfo(skb)->frags[0];
+		for (i = 0; i < nr_frags; i++, frag++) {
+			unsigned int size;
+
+			/* Point at the next BD, wrapping as needed */
+			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+			size = skb_frag_size(frag);
+
+			lstatus = be32_to_cpu(txbdp->lstatus) | size |
+				  BD_LFLAG(TXBD_READY);
+
+			/* Handle the last BD specially */
+			if (i == nr_frags - 1)
+				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
+						   size, DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+				goto dma_map_err;
+
+			/* set the TxBD length and buffer pointer */
+			txbdp->bufPtr = cpu_to_be32(bufaddr);
+			txbdp->lstatus = cpu_to_be32(lstatus);
+		}
+
+		lstatus = lstatus_start;
+	}
+
+	/* If time stamping is requested one additional TxBD must be set up. The
+	 * first TxBD points to the FCB and must have a data length of
+	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+	 * the full frame length.
+	 */
+	if (unlikely(do_tstamp)) {
+		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
+		bufaddr += fcb_len;
+
+		lstatus_ts |= BD_LFLAG(TXBD_READY) |
+			      (skb_headlen(skb) - fcb_len);
+		if (!nr_frags)
+			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
+		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
+		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+
+		/* Setup tx hardware time stamping */
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		fcb->ptp = 1;
+	} else {
+		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+	}
+
+	netdev_tx_sent_queue(txq, bytes_sent);
+
+	gfar_wmb();
+
+	txbdp_start->lstatus = cpu_to_be32(lstatus);
+
+	gfar_wmb(); /* force lstatus write before tx_skbuff */
+
+	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
+	/* Update the current skb pointer to the next entry we will use
+	 * (wrapping if necessary)
+	 */
+	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+
+	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+	/* We can work in parallel with gfar_clean_tx_ring(), except
+	 * when modifying num_txbdfree. Note that we didn't grab the lock
+	 * when we were reading the num_txbdfree and checking for available
+	 * space, that's because outside of this function it can only grow.
+	 */
+	spin_lock_bh(&tx_queue->txlock);
+	/* reduce TxBD free count */
+	tx_queue->num_txbdfree -= (nr_txbds);
+	spin_unlock_bh(&tx_queue->txlock);
+
+	/* If the next BD still needs to be cleaned up, then the bds
+	 * are full.  We need to tell the kernel to stop sending us stuff.
+	 */
+	if (!tx_queue->num_txbdfree) {
+		netif_tx_stop_queue(txq);
+
+		dev->stats.tx_fifo_errors++;
+	}
+
+	/* Tell the DMA to go go go */
+	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
+
+	return NETDEV_TX_OK;
+
+dma_map_err:
+	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+	if (do_tstamp)
+		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+	for (i = 0; i < nr_frags; i++) {
+		lstatus = be32_to_cpu(txbdp->lstatus);
+		if (!(lstatus & BD_LFLAG(TXBD_READY)))
+			break;
+
+		lstatus &= ~BD_LFLAG(TXBD_READY);
+		txbdp->lstatus = cpu_to_be32(lstatus);
+		bufaddr = be32_to_cpu(txbdp->bufPtr);
+		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
+			       DMA_TO_DEVICE);
+		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+	}
+	gfar_wmb();
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/* Changes the mac address if the controller is not running. */
+static int gfar_set_mac_address(struct net_device *dev)
+{
+	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+	return 0;
+}
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+		cpu_relax();
+
+	if (dev->flags & IFF_UP)
+		stop_gfar(dev);
+
+	dev->mtu = new_mtu;
+
+	if (dev->flags & IFF_UP)
+		startup_gfar(dev);
+
+	clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+	return 0;
+}
+
+static void reset_gfar(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+
+	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+		cpu_relax();
+
+	stop_gfar(ndev);
+	startup_gfar(ndev);
+
+	clear_bit_unlock(GFAR_RESETTING, &priv->state);
+}
+
+/* gfar_reset_task gets scheduled when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem.
+ */
+static void gfar_reset_task(struct work_struct *work)
+{
+	struct gfar_private *priv = container_of(work, struct gfar_private,
+						 reset_task);
+	reset_gfar(priv->ndev);
+}
+
+static void gfar_timeout(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	dev->stats.tx_errors++;
+	schedule_work(&priv->reset_task);
+}
+
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	struct gfar_private *priv = netdev_priv(netdev);
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		priv->hwts_tx_en = 0;
+		break;
+	case HWTSTAMP_TX_ON:
+		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+			return -ERANGE;
+		priv->hwts_tx_en = 1;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		if (priv->hwts_rx_en) {
+			priv->hwts_rx_en = 0;
+			reset_gfar(netdev);
+		}
+		break;
+	default:
+		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+			return -ERANGE;
+		if (!priv->hwts_rx_en) {
+			priv->hwts_rx_en = 1;
+			reset_gfar(netdev);
+		}
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	}
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	struct gfar_private *priv = netdev_priv(netdev);
+
+	config.flags = 0;
+	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	config.rx_filter = (priv->hwts_rx_en ?
+			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct phy_device *phydev = dev->phydev;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	if (cmd == SIOCSHWTSTAMP)
+		return gfar_hwtstamp_set(dev, rq);
+	if (cmd == SIOCGHWTSTAMP)
+		return gfar_hwtstamp_get(dev, rq);
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+/* Interrupt Handler for Transmit complete */
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+{
+	struct net_device *dev = tx_queue->dev;
+	struct netdev_queue *txq;
+	struct gfar_private *priv = netdev_priv(dev);
+	struct txbd8 *bdp, *next = NULL;
+	struct txbd8 *lbdp = NULL;
+	struct txbd8 *base = tx_queue->tx_bd_base;
+	struct sk_buff *skb;
+	int skb_dirtytx;
+	int tx_ring_size = tx_queue->tx_ring_size;
+	int frags = 0, nr_txbds = 0;
+	int i;
+	int howmany = 0;
+	int tqi = tx_queue->qindex;
+	unsigned int bytes_sent = 0;
+	u32 lstatus;
+	size_t buflen;
+
+	txq = netdev_get_tx_queue(dev, tqi);
+	bdp = tx_queue->dirty_tx;
+	skb_dirtytx = tx_queue->skb_dirtytx;
+
+	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+
+		frags = skb_shinfo(skb)->nr_frags;
+
+		/* When time stamping, one additional TxBD must be freed.
+		 * Also, we need to dma_unmap_single() the TxPAL.
+		 */
+		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+			nr_txbds = frags + 2;
+		else
+			nr_txbds = frags + 1;
+
+		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
+
+		lstatus = be32_to_cpu(lbdp->lstatus);
+
+		/* Only clean completed frames */
+		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+		    (lstatus & BD_LENGTH_MASK))
+			break;
+
+		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+			next = next_txbd(bdp, base, tx_ring_size);
+			buflen = be16_to_cpu(next->length) +
+				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+		} else
+			buflen = be16_to_cpu(bdp->length);
+
+		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
+				 buflen, DMA_TO_DEVICE);
+
+		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+			struct skb_shared_hwtstamps shhwtstamps;
+			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+					  ~0x7UL);
+
+			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
+			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
+			skb_tstamp_tx(skb, &shhwtstamps);
+			gfar_clear_txbd_status(bdp);
+			bdp = next;
+		}
+
+		gfar_clear_txbd_status(bdp);
+		bdp = next_txbd(bdp, base, tx_ring_size);
+
+		for (i = 0; i < frags; i++) {
+			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
+				       be16_to_cpu(bdp->length),
+				       DMA_TO_DEVICE);
+			gfar_clear_txbd_status(bdp);
+			bdp = next_txbd(bdp, base, tx_ring_size);
+		}
+
+		bytes_sent += GFAR_CB(skb)->bytes_sent;
+
+		dev_kfree_skb_any(skb);
+
+		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
+
+		skb_dirtytx = (skb_dirtytx + 1) &
+			      TX_RING_MOD_MASK(tx_ring_size);
+
+		howmany++;
+		spin_lock(&tx_queue->txlock);
+		tx_queue->num_txbdfree += nr_txbds;
+		spin_unlock(&tx_queue->txlock);
+	}
+
+	/* If we freed a buffer, we can restart transmission, if necessary */
+	if (tx_queue->num_txbdfree &&
+	    netif_tx_queue_stopped(txq) &&
+	    !(test_bit(GFAR_DOWN, &priv->state)))
+		netif_wake_subqueue(priv->ndev, tqi);
+
+	/* Update dirty indicators */
+	tx_queue->skb_dirtytx = skb_dirtytx;
+	tx_queue->dirty_tx = bdp;
+
+	netdev_tx_completed_queue(txq, howmany, bytes_sent);
+}
+
+static void count_errors(u32 lstatus, struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	struct gfar_extra_stats *estats = &priv->extra_stats;
+
+	/* If the packet was truncated, none of the other errors matter */
+	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
+		stats->rx_length_errors++;
+
+		atomic64_inc(&estats->rx_trunc);
+
+		return;
+	}
+	/* Count the errors, if there were any */
+	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
+		stats->rx_length_errors++;
+
+		if (lstatus & BD_LFLAG(RXBD_LARGE))
+			atomic64_inc(&estats->rx_large);
+		else
+			atomic64_inc(&estats->rx_short);
+	}
+	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
+		stats->rx_frame_errors++;
+		atomic64_inc(&estats->rx_nonoctet);
+	}
+	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
+		atomic64_inc(&estats->rx_crcerr);
+		stats->rx_crc_errors++;
+	}
+	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
+		atomic64_inc(&estats->rx_overrun);
+		stats->rx_over_errors++;
+	}
+}
+
+static irqreturn_t gfar_receive(int irq, void *grp_id)
+{
+	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+	unsigned long flags;
+	u32 imask, ievent;
+
+	ievent = gfar_read(&grp->regs->ievent);
+
+	if (unlikely(ievent & IEVENT_FGPI)) {
+		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+		return IRQ_HANDLED;
+	}
+
+	if (likely(napi_schedule_prep(&grp->napi_rx))) {
+		spin_lock_irqsave(&grp->grplock, flags);
+		imask = gfar_read(&grp->regs->imask);
+		imask &= IMASK_RX_DISABLED;
+		gfar_write(&grp->regs->imask, imask);
+		spin_unlock_irqrestore(&grp->grplock, flags);
+		__napi_schedule(&grp->napi_rx);
+	} else {
+		/* Clear IEVENT, so interrupts aren't called again
+		 * because of the packets that have already arrived.
+		 */
+		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+	unsigned long flags;
+	u32 imask;
+
+	if (likely(napi_schedule_prep(&grp->napi_tx))) {
+		spin_lock_irqsave(&grp->grplock, flags);
+		imask = gfar_read(&grp->regs->imask);
+		imask &= IMASK_TX_DISABLED;
+		gfar_write(&grp->regs->imask, imask);
+		spin_unlock_irqrestore(&grp->grplock, flags);
+		__napi_schedule(&grp->napi_tx);
+	} else {
+		/* Clear IEVENT, so interrupts aren't called again
+		 * because of the packets that have already arrived.
+		 */
+		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+			     struct sk_buff *skb, bool first)
+{
+	int size = lstatus & BD_LENGTH_MASK;
+	struct page *page = rxb->page;
+
+	if (likely(first)) {
+		skb_put(skb, size);
+	} else {
+		/* the last fragments' length contains the full frame length */
+		if (lstatus & BD_LFLAG(RXBD_LAST))
+			size -= skb->len;
+
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+				rxb->page_offset + RXBUF_ALIGNMENT,
+				size, GFAR_RXB_TRUESIZE);
+	}
+
+	/* try reuse page */
+	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
+		return false;
+
+	/* change offset to the other half */
+	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+	page_ref_inc(page);
+
+	return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+			       struct gfar_rx_buff *old_rxb)
+{
+	struct gfar_rx_buff *new_rxb;
+	u16 nta = rxq->next_to_alloc;
+
+	new_rxb = &rxq->rx_buff[nta];
+
+	/* find next buf that can reuse a page */
+	nta++;
+	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+	/* copy page reference */
+	*new_rxb = *old_rxb;
+
+	/* sync for use by the device */
+	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+					 old_rxb->page_offset,
+					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+					    u32 lstatus, struct sk_buff *skb)
+{
+	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+	struct page *page = rxb->page;
+	bool first = false;
+
+	if (likely(!skb)) {
+		void *buff_addr = page_address(page) + rxb->page_offset;
+
+		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+		if (unlikely(!skb)) {
+			gfar_rx_alloc_err(rx_queue);
+			return NULL;
+		}
+		skb_reserve(skb, RXBUF_ALIGNMENT);
+		first = true;
+	}
+
+	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+		/* reuse the free half of the page */
+		gfar_reuse_rx_page(rx_queue, rxb);
+	} else {
+		/* page cannot be reused, unmap it */
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+	}
+
+	/* clear rxb content */
+	rxb->page = NULL;
+
+	return skb;
+}
+
+static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
+{
+	/* If valid headers were found, and valid sums
+	 * were verified, then we tell the kernel that no
+	 * checksumming is necessary.  Otherwise, it is [FIXME]
+	 */
+	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
+	    (RXFCB_CIP | RXFCB_CTU))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	else
+		skb_checksum_none_assert(skb);
+}
+
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct rxfcb *fcb = NULL;
+
+	/* fcb is at the beginning if exists */
+	fcb = (struct rxfcb *)skb->data;
+
+	/* Remove the FCB from the skb
+	 * Remove the padded bytes, if there are any
+	 */
+	if (priv->uses_rxfcb)
+		skb_pull(skb, GMAC_FCB_LEN);
+
+	/* Get receive timestamp from the skb */
+	if (priv->hwts_rx_en) {
+		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+		u64 *ns = (u64 *) skb->data;
+
+		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
+	}
+
+	if (priv->padding)
+		skb_pull(skb, priv->padding);
+
+	/* Trim off the FCS */
+	pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+	if (ndev->features & NETIF_F_RXCSUM)
+		gfar_rx_checksum(skb, fcb);
+
+	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
+	 * Even if vlan rx accel is disabled, on some chips
+	 * RXFCB_VLN is pseudo randomly set.
+	 */
+	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       be16_to_cpu(fcb->vlctl));
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
+ */
+static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+			      int rx_work_limit)
+{
+	struct net_device *ndev = rx_queue->ndev;
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct rxbd8 *bdp;
+	int i, howmany = 0;
+	struct sk_buff *skb = rx_queue->skb;
+	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+	unsigned int total_bytes = 0, total_pkts = 0;
+
+	/* Get the first full descriptor */
+	i = rx_queue->next_to_clean;
+
+	while (rx_work_limit--) {
+		u32 lstatus;
+
+		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+			cleaned_cnt = 0;
+		}
+
+		bdp = &rx_queue->rx_bd_base[i];
+		lstatus = be32_to_cpu(bdp->lstatus);
+		if (lstatus & BD_LFLAG(RXBD_EMPTY))
+			break;
+
+		/* order rx buffer descriptor reads */
+		rmb();
+
+		/* fetch next to clean buffer from the ring */
+		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+		if (unlikely(!skb))
+			break;
+
+		cleaned_cnt++;
+		howmany++;
+
+		if (unlikely(++i == rx_queue->rx_ring_size))
+			i = 0;
+
+		rx_queue->next_to_clean = i;
+
+		/* fetch next buffer if not the last in frame */
+		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+			continue;
+
+		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+			count_errors(lstatus, ndev);
+
+			/* discard faulty buffer */
+			dev_kfree_skb(skb);
+			skb = NULL;
+			rx_queue->stats.rx_dropped++;
+			continue;
+		}
+
+		gfar_process_frame(ndev, skb);
+
+		/* Increment the number of packets */
+		total_pkts++;
+		total_bytes += skb->len;
+
+		skb_record_rx_queue(skb, rx_queue->qindex);
+
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		/* Send the packet up the stack */
+		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
+
+		skb = NULL;
+	}
+
+	/* Store incomplete frames for completion */
+	rx_queue->skb = skb;
+
+	rx_queue->stats.rx_packets += total_pkts;
+	rx_queue->stats.rx_bytes += total_bytes;
+
+	if (cleaned_cnt)
+		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+	/* Update Last Free RxBD pointer for LFC */
+	if (unlikely(priv->tx_actual_en)) {
+		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+		gfar_write(rx_queue->rfbptr, bdp_dma);
+	}
+
+	return howmany;
+}
+
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
+{
+	struct gfar_priv_grp *gfargrp =
+		container_of(napi, struct gfar_priv_grp, napi_rx);
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
+	int work_done = 0;
+
+	/* Clear IEVENT, so interrupts aren't called again
+	 * because of the packets that have already arrived
+	 */
+	gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+	work_done = gfar_clean_rx_ring(rx_queue, budget);
+
+	if (work_done < budget) {
+		u32 imask;
+		napi_complete_done(napi, work_done);
+		/* Clear the halt bit in RSTAT */
+		gfar_write(&regs->rstat, gfargrp->rstat);
+
+		spin_lock_irq(&gfargrp->grplock);
+		imask = gfar_read(&regs->imask);
+		imask |= IMASK_RX_DEFAULT;
+		gfar_write(&regs->imask, imask);
+		spin_unlock_irq(&gfargrp->grplock);
+	}
+
+	return work_done;
+}
+
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
+{
+	struct gfar_priv_grp *gfargrp =
+		container_of(napi, struct gfar_priv_grp, napi_tx);
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
+	u32 imask;
+
+	/* Clear IEVENT, so interrupts aren't called again
+	 * because of the packets that have already arrived
+	 */
+	gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+	/* run Tx cleanup to completion */
+	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+		gfar_clean_tx_ring(tx_queue);
+
+	napi_complete(napi);
+
+	spin_lock_irq(&gfargrp->grplock);
+	imask = gfar_read(&regs->imask);
+	imask |= IMASK_TX_DEFAULT;
+	gfar_write(&regs->imask, imask);
+	spin_unlock_irq(&gfargrp->grplock);
+
+	return 0;
+}
+
+static int gfar_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct gfar_priv_grp *gfargrp =
+		container_of(napi, struct gfar_priv_grp, napi_rx);
+	struct gfar_private *priv = gfargrp->priv;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int work_done = 0, work_done_per_q = 0;
+	int i, budget_per_q = 0;
+	unsigned long rstat_rxf;
+	int num_act_queues;
+
+	/* Clear IEVENT, so interrupts aren't called again
+	 * because of the packets that have already arrived
+	 */
+	gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
+
+	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
+	if (num_act_queues)
+		budget_per_q = budget/num_act_queues;
+
+	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+		/* skip queue if not active */
+		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+			continue;
+
+		rx_queue = priv->rx_queue[i];
+		work_done_per_q =
+			gfar_clean_rx_ring(rx_queue, budget_per_q);
+		work_done += work_done_per_q;
+
+		/* finished processing this queue */
+		if (work_done_per_q < budget_per_q) {
+			/* clear active queue hw indication */
+			gfar_write(&regs->rstat,
+				   RSTAT_CLEAR_RXF0 >> i);
+			num_act_queues--;
+
+			if (!num_act_queues)
+				break;
+		}
+	}
+
+	if (!num_act_queues) {
+		u32 imask;
+		napi_complete_done(napi, work_done);
+
+		/* Clear the halt bit in RSTAT */
+		gfar_write(&regs->rstat, gfargrp->rstat);
+
+		spin_lock_irq(&gfargrp->grplock);
+		imask = gfar_read(&regs->imask);
+		imask |= IMASK_RX_DEFAULT;
+		gfar_write(&regs->imask, imask);
+		spin_unlock_irq(&gfargrp->grplock);
+	}
+
+	return work_done;
+}
+
+static int gfar_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct gfar_priv_grp *gfargrp =
+		container_of(napi, struct gfar_priv_grp, napi_tx);
+	struct gfar_private *priv = gfargrp->priv;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	int has_tx_work = 0;
+	int i;
+
+	/* Clear IEVENT, so interrupts aren't called again
+	 * because of the packets that have already arrived
+	 */
+	gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+		tx_queue = priv->tx_queue[i];
+		/* run Tx cleanup to completion */
+		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+			gfar_clean_tx_ring(tx_queue);
+			has_tx_work = 1;
+		}
+	}
+
+	if (!has_tx_work) {
+		u32 imask;
+		napi_complete(napi);
+
+		spin_lock_irq(&gfargrp->grplock);
+		imask = gfar_read(&regs->imask);
+		imask |= IMASK_TX_DEFAULT;
+		gfar_write(&regs->imask, imask);
+		spin_unlock_irq(&gfargrp->grplock);
+	}
+
+	return 0;
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *grp_id)
+{
+	struct gfar_priv_grp *gfargrp = grp_id;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_private *priv= gfargrp->priv;
+	struct net_device *dev = priv->ndev;
+
+	/* Save ievent for future reference */
+	u32 events = gfar_read(&regs->ievent);
+
+	/* Clear IEVENT */
+	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
+
+	/* Magic Packet is not an error. */
+	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+	    (events & IEVENT_MAG))
+		events &= ~IEVENT_MAG;
+
+	/* Hmm... */
+	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
+		netdev_dbg(dev,
+			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+			   events, gfar_read(&regs->imask));
+
+	/* Update the error counters */
+	if (events & IEVENT_TXE) {
+		dev->stats.tx_errors++;
+
+		if (events & IEVENT_LC)
+			dev->stats.tx_window_errors++;
+		if (events & IEVENT_CRL)
+			dev->stats.tx_aborted_errors++;
+		if (events & IEVENT_XFUN) {
+			netif_dbg(priv, tx_err, dev,
+				  "TX FIFO underrun, packet dropped\n");
+			dev->stats.tx_dropped++;
+			atomic64_inc(&priv->extra_stats.tx_underrun);
+
+			schedule_work(&priv->reset_task);
+		}
+		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
+	}
+	if (events & IEVENT_BSY) {
+		dev->stats.rx_over_errors++;
+		atomic64_inc(&priv->extra_stats.rx_bsy);
+
+		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
+			  gfar_read(&regs->rstat));
+	}
+	if (events & IEVENT_BABR) {
+		dev->stats.rx_errors++;
+		atomic64_inc(&priv->extra_stats.rx_babr);
+
+		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
+	}
+	if (events & IEVENT_EBERR) {
+		atomic64_inc(&priv->extra_stats.eberr);
+		netif_dbg(priv, rx_err, dev, "bus error\n");
+	}
+	if (events & IEVENT_RXC)
+		netif_dbg(priv, rx_status, dev, "control frame\n");
+
+	if (events & IEVENT_BABT) {
+		atomic64_inc(&priv->extra_stats.tx_babt);
+		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
+	}
+	return IRQ_HANDLED;
+}
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
+{
+	struct gfar_priv_grp *gfargrp = grp_id;
+
+	/* Save ievent for future reference */
+	u32 events = gfar_read(&gfargrp->regs->ievent);
+
+	/* Check for reception */
+	if (events & IEVENT_RX_MASK)
+		gfar_receive(irq, grp_id);
+
+	/* Check for transmit completion */
+	if (events & IEVENT_TX_MASK)
+		gfar_transmit(irq, grp_id);
+
+	/* Check for errors */
+	if (events & IEVENT_ERR_MASK)
+		gfar_error(irq, grp_id);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void gfar_netpoll(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	int i;
+
+	/* If the device has multiple interrupts, run tx/rx */
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+		for (i = 0; i < priv->num_grps; i++) {
+			struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+			disable_irq(gfar_irq(grp, TX)->irq);
+			disable_irq(gfar_irq(grp, RX)->irq);
+			disable_irq(gfar_irq(grp, ER)->irq);
+			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+			enable_irq(gfar_irq(grp, ER)->irq);
+			enable_irq(gfar_irq(grp, RX)->irq);
+			enable_irq(gfar_irq(grp, TX)->irq);
+		}
+	} else {
+		for (i = 0; i < priv->num_grps; i++) {
+			struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+			disable_irq(gfar_irq(grp, TX)->irq);
+			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+			enable_irq(gfar_irq(grp, TX)->irq);
+		}
+	}
+}
+#endif
+
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+	free_irq(gfar_irq(grp, TX)->irq, grp);
+	free_irq(gfar_irq(grp, RX)->irq, grp);
+	free_irq(gfar_irq(grp, ER)->irq, grp);
+}
+
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+	struct gfar_private *priv = grp->priv;
+	struct net_device *dev = priv->ndev;
+	int err;
+
+	/* If the device has multiple interrupts, register for
+	 * them.  Otherwise, only register for the one
+	 */
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+		/* Install our interrupt handlers for Error,
+		 * Transmit, and Receive
+		 */
+		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+				  gfar_irq(grp, ER)->name, grp);
+		if (err < 0) {
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  gfar_irq(grp, ER)->irq);
+
+			goto err_irq_fail;
+		}
+		enable_irq_wake(gfar_irq(grp, ER)->irq);
+
+		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
+				  gfar_irq(grp, TX)->name, grp);
+		if (err < 0) {
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  gfar_irq(grp, TX)->irq);
+			goto tx_irq_fail;
+		}
+		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+				  gfar_irq(grp, RX)->name, grp);
+		if (err < 0) {
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  gfar_irq(grp, RX)->irq);
+			goto rx_irq_fail;
+		}
+		enable_irq_wake(gfar_irq(grp, RX)->irq);
+
+	} else {
+		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+				  gfar_irq(grp, TX)->name, grp);
+		if (err < 0) {
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  gfar_irq(grp, TX)->irq);
+			goto err_irq_fail;
+		}
+		enable_irq_wake(gfar_irq(grp, TX)->irq);
+	}
+
+	return 0;
+
+rx_irq_fail:
+	free_irq(gfar_irq(grp, TX)->irq, grp);
+tx_irq_fail:
+	free_irq(gfar_irq(grp, ER)->irq, grp);
+err_irq_fail:
+	return err;
+
+}
+
+static void gfar_free_irq(struct gfar_private *priv)
+{
+	int i;
+
+	/* Free the IRQs */
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+		for (i = 0; i < priv->num_grps; i++)
+			free_grp_irqs(&priv->gfargrp[i]);
+	} else {
+		for (i = 0; i < priv->num_grps; i++)
+			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+				 &priv->gfargrp[i]);
+	}
+}
+
+static int gfar_request_irq(struct gfar_private *priv)
+{
+	int err, i, j;
+
+	for (i = 0; i < priv->num_grps; i++) {
+		err = register_grp_irqs(&priv->gfargrp[i]);
+		if (err) {
+			for (j = 0; j < i; j++)
+				free_grp_irqs(&priv->gfargrp[j]);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
+static int gfar_enet_open(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	int err;
+
+	err = init_phy(dev);
+	if (err)
+		return err;
+
+	err = gfar_request_irq(priv);
+	if (err)
+		return err;
+
+	err = startup_gfar(dev);
+	if (err)
+		return err;
+
+	return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	cancel_work_sync(&priv->reset_task);
+	stop_gfar(dev);
+
+	/* Disconnect from the PHY */
+	phy_disconnect(dev->phydev);
+
+	gfar_free_irq(priv);
+
+	return 0;
+}
+
+/* Clears each of the exact match registers to zero, so they
+ * don't interfere with normal reception
+ */
+static void gfar_clear_exact_match(struct net_device *dev)
+{
+	int idx;
+	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
+		gfar_set_mac_for_addr(dev, idx, zero_arr);
+}
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to.  Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed
+ */
+static void gfar_set_multi(struct net_device *dev)
+{
+	struct netdev_hw_addr *ha;
+	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Set RCTRL to PROM */
+		tempval = gfar_read(&regs->rctrl);
+		tempval |= RCTRL_PROM;
+		gfar_write(&regs->rctrl, tempval);
+	} else {
+		/* Set RCTRL to not PROM */
+		tempval = gfar_read(&regs->rctrl);
+		tempval &= ~(RCTRL_PROM);
+		gfar_write(&regs->rctrl, tempval);
+	}
+
+	if (dev->flags & IFF_ALLMULTI) {
+		/* Set the hash to rx all multicast frames */
+		gfar_write(&regs->igaddr0, 0xffffffff);
+		gfar_write(&regs->igaddr1, 0xffffffff);
+		gfar_write(&regs->igaddr2, 0xffffffff);
+		gfar_write(&regs->igaddr3, 0xffffffff);
+		gfar_write(&regs->igaddr4, 0xffffffff);
+		gfar_write(&regs->igaddr5, 0xffffffff);
+		gfar_write(&regs->igaddr6, 0xffffffff);
+		gfar_write(&regs->igaddr7, 0xffffffff);
+		gfar_write(&regs->gaddr0, 0xffffffff);
+		gfar_write(&regs->gaddr1, 0xffffffff);
+		gfar_write(&regs->gaddr2, 0xffffffff);
+		gfar_write(&regs->gaddr3, 0xffffffff);
+		gfar_write(&regs->gaddr4, 0xffffffff);
+		gfar_write(&regs->gaddr5, 0xffffffff);
+		gfar_write(&regs->gaddr6, 0xffffffff);
+		gfar_write(&regs->gaddr7, 0xffffffff);
+	} else {
+		int em_num;
+		int idx;
+
+		/* zero out the hash */
+		gfar_write(&regs->igaddr0, 0x0);
+		gfar_write(&regs->igaddr1, 0x0);
+		gfar_write(&regs->igaddr2, 0x0);
+		gfar_write(&regs->igaddr3, 0x0);
+		gfar_write(&regs->igaddr4, 0x0);
+		gfar_write(&regs->igaddr5, 0x0);
+		gfar_write(&regs->igaddr6, 0x0);
+		gfar_write(&regs->igaddr7, 0x0);
+		gfar_write(&regs->gaddr0, 0x0);
+		gfar_write(&regs->gaddr1, 0x0);
+		gfar_write(&regs->gaddr2, 0x0);
+		gfar_write(&regs->gaddr3, 0x0);
+		gfar_write(&regs->gaddr4, 0x0);
+		gfar_write(&regs->gaddr5, 0x0);
+		gfar_write(&regs->gaddr6, 0x0);
+		gfar_write(&regs->gaddr7, 0x0);
+
+		/* If we have extended hash tables, we need to
+		 * clear the exact match registers to prepare for
+		 * setting them
+		 */
+		if (priv->extended_hash) {
+			em_num = GFAR_EM_NUM + 1;
+			gfar_clear_exact_match(dev);
+			idx = 1;
+		} else {
+			idx = 0;
+			em_num = 0;
+		}
+
+		if (netdev_mc_empty(dev))
+			return;
+
+		/* Parse the list, and set the appropriate bits */
+		netdev_for_each_mc_addr(ha, dev) {
+			if (idx < em_num) {
+				gfar_set_mac_for_addr(dev, idx, ha->addr);
+				idx++;
+			} else
+				gfar_set_hash_for_addr(dev, ha->addr);
+		}
+	}
+}
+
 void gfar_mac_reset(struct gfar_private *priv)
 {
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
@@ -1267,45 +3274,23 @@
 		gfar_write_isrg(priv);
 }
 
-static void gfar_init_addr_hash_table(struct gfar_private *priv)
-{
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
-		priv->extended_hash = 1;
-		priv->hash_width = 9;
-
-		priv->hash_regs[0] = &regs->igaddr0;
-		priv->hash_regs[1] = &regs->igaddr1;
-		priv->hash_regs[2] = &regs->igaddr2;
-		priv->hash_regs[3] = &regs->igaddr3;
-		priv->hash_regs[4] = &regs->igaddr4;
-		priv->hash_regs[5] = &regs->igaddr5;
-		priv->hash_regs[6] = &regs->igaddr6;
-		priv->hash_regs[7] = &regs->igaddr7;
-		priv->hash_regs[8] = &regs->gaddr0;
-		priv->hash_regs[9] = &regs->gaddr1;
-		priv->hash_regs[10] = &regs->gaddr2;
-		priv->hash_regs[11] = &regs->gaddr3;
-		priv->hash_regs[12] = &regs->gaddr4;
-		priv->hash_regs[13] = &regs->gaddr5;
-		priv->hash_regs[14] = &regs->gaddr6;
-		priv->hash_regs[15] = &regs->gaddr7;
-
-	} else {
-		priv->extended_hash = 0;
-		priv->hash_width = 8;
-
-		priv->hash_regs[0] = &regs->gaddr0;
-		priv->hash_regs[1] = &regs->gaddr1;
-		priv->hash_regs[2] = &regs->gaddr2;
-		priv->hash_regs[3] = &regs->gaddr3;
-		priv->hash_regs[4] = &regs->gaddr4;
-		priv->hash_regs[5] = &regs->gaddr5;
-		priv->hash_regs[6] = &regs->gaddr6;
-		priv->hash_regs[7] = &regs->gaddr7;
-	}
-}
+static const struct net_device_ops gfar_netdev_ops = {
+	.ndo_open = gfar_enet_open,
+	.ndo_start_xmit = gfar_start_xmit,
+	.ndo_stop = gfar_close,
+	.ndo_change_mtu = gfar_change_mtu,
+	.ndo_set_features = gfar_set_features,
+	.ndo_set_rx_mode = gfar_set_multi,
+	.ndo_tx_timeout = gfar_timeout,
+	.ndo_do_ioctl = gfar_ioctl,
+	.ndo_get_stats = gfar_get_stats,
+	.ndo_change_carrier = fixed_phy_change_carrier,
+	.ndo_set_mac_address = gfar_set_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = gfar_netpoll,
+#endif
+};
 
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start
@@ -1736,2037 +3721,6 @@
 
 #endif
 
-/* Reads the controller's registers to determine what interface
- * connects it to the PHY.
- */
-static phy_interface_t gfar_get_interface(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 ecntrl;
-
-	ecntrl = gfar_read(&regs->ecntrl);
-
-	if (ecntrl & ECNTRL_SGMII_MODE)
-		return PHY_INTERFACE_MODE_SGMII;
-
-	if (ecntrl & ECNTRL_TBI_MODE) {
-		if (ecntrl & ECNTRL_REDUCED_MODE)
-			return PHY_INTERFACE_MODE_RTBI;
-		else
-			return PHY_INTERFACE_MODE_TBI;
-	}
-
-	if (ecntrl & ECNTRL_REDUCED_MODE) {
-		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
-			return PHY_INTERFACE_MODE_RMII;
-		}
-		else {
-			phy_interface_t interface = priv->interface;
-
-			/* This isn't autodetected right now, so it must
-			 * be set by the device tree or platform code.
-			 */
-			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
-				return PHY_INTERFACE_MODE_RGMII_ID;
-
-			return PHY_INTERFACE_MODE_RGMII;
-		}
-	}
-
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
-		return PHY_INTERFACE_MODE_GMII;
-
-	return PHY_INTERFACE_MODE_MII;
-}
-
-
-/* Initializes driver's PHY state, and attaches to the PHY.
- * Returns 0 on success.
- */
-static int init_phy(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	uint gigabit_support =
-		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
-		GFAR_SUPPORTED_GBIT : 0;
-	phy_interface_t interface;
-	struct phy_device *phydev;
-	struct ethtool_eee edata;
-
-	priv->oldlink = 0;
-	priv->oldspeed = 0;
-	priv->oldduplex = -1;
-
-	interface = gfar_get_interface(dev);
-
-	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
-				interface);
-	if (!phydev) {
-		dev_err(&dev->dev, "could not attach to PHY\n");
-		return -ENODEV;
-	}
-
-	if (interface == PHY_INTERFACE_MODE_SGMII)
-		gfar_configure_serdes(dev);
-
-	/* Remove any features not supported by the controller */
-	phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
-	phydev->advertising = phydev->supported;
-
-	/* Add support for flow control, but don't advertise it by default */
-	phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-
-	/* disable EEE autoneg, EEE not supported by eTSEC */
-	memset(&edata, 0, sizeof(struct ethtool_eee));
-	phy_ethtool_set_eee(phydev, &edata);
-
-	return 0;
-}
-
-/* Initialize TBI PHY interface for communicating with the
- * SERDES lynx PHY on the chip.  We communicate with this PHY
- * through the MDIO bus on each controller, treating it as a
- * "normal" PHY at the address found in the TBIPA register.  We assume
- * that the TBIPA register is valid.  Either the MDIO bus code will set
- * it to a value that doesn't conflict with other PHYs on the bus, or the
- * value doesn't matter, as there are no other PHYs on the bus.
- */
-static void gfar_configure_serdes(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct phy_device *tbiphy;
-
-	if (!priv->tbi_node) {
-		dev_warn(&dev->dev, "error: SGMII mode requires that the "
-				    "device tree specify a tbi-handle\n");
-		return;
-	}
-
-	tbiphy = of_phy_find_device(priv->tbi_node);
-	if (!tbiphy) {
-		dev_err(&dev->dev, "error: Could not get TBI device\n");
-		return;
-	}
-
-	/* If the link is already up, we must already be ok, and don't need to
-	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
-	 * everything for us?  Resetting it takes the link down and requires
-	 * several seconds for it to come back.
-	 */
-	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
-		put_device(&tbiphy->mdio.dev);
-		return;
-	}
-
-	/* Single clk mode, mii mode off(for serdes communication) */
-	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
-
-	phy_write(tbiphy, MII_ADVERTISE,
-		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
-		  ADVERTISE_1000XPSE_ASYM);
-
-	phy_write(tbiphy, MII_BMCR,
-		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
-		  BMCR_SPEED1000);
-
-	put_device(&tbiphy->mdio.dev);
-}
-
-static int __gfar_is_rx_idle(struct gfar_private *priv)
-{
-	u32 res;
-
-	/* Normaly TSEC should not hang on GRS commands, so we should
-	 * actually wait for IEVENT_GRSC flag.
-	 */
-	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
-		return 0;
-
-	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
-	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
-	 * and the Rx can be safely reset.
-	 */
-	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
-	res &= 0x7f807f80;
-	if ((res & 0xffff) == (res >> 16))
-		return 1;
-
-	return 0;
-}
-
-/* Halt the receive and transmit queues */
-static void gfar_halt_nodisable(struct gfar_private *priv)
-{
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 tempval;
-	unsigned int timeout;
-	int stopped;
-
-	gfar_ints_disable(priv);
-
-	if (gfar_is_dma_stopped(priv))
-		return;
-
-	/* Stop the DMA, and wait for it to stop */
-	tempval = gfar_read(&regs->dmactrl);
-	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-	gfar_write(&regs->dmactrl, tempval);
-
-retry:
-	timeout = 1000;
-	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
-		cpu_relax();
-		timeout--;
-	}
-
-	if (!timeout)
-		stopped = gfar_is_dma_stopped(priv);
-
-	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
-	    !__gfar_is_rx_idle(priv))
-		goto retry;
-}
-
-/* Halt the receive and transmit queues */
-void gfar_halt(struct gfar_private *priv)
-{
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 tempval;
-
-	/* Dissable the Rx/Tx hw queues */
-	gfar_write(&regs->rqueue, 0);
-	gfar_write(&regs->tqueue, 0);
-
-	mdelay(10);
-
-	gfar_halt_nodisable(priv);
-
-	/* Disable Rx/Tx DMA */
-	tempval = gfar_read(&regs->maccfg1);
-	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
-	gfar_write(&regs->maccfg1, tempval);
-}
-
-void stop_gfar(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-
-	netif_tx_stop_all_queues(dev);
-
-	smp_mb__before_atomic();
-	set_bit(GFAR_DOWN, &priv->state);
-	smp_mb__after_atomic();
-
-	disable_napi(priv);
-
-	/* disable ints and gracefully shut down Rx/Tx DMA */
-	gfar_halt(priv);
-
-	phy_stop(dev->phydev);
-
-	free_skb_resources(priv);
-}
-
-static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
-{
-	struct txbd8 *txbdp;
-	struct gfar_private *priv = netdev_priv(tx_queue->dev);
-	int i, j;
-
-	txbdp = tx_queue->tx_bd_base;
-
-	for (i = 0; i < tx_queue->tx_ring_size; i++) {
-		if (!tx_queue->tx_skbuff[i])
-			continue;
-
-		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
-				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
-		txbdp->lstatus = 0;
-		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
-		     j++) {
-			txbdp++;
-			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
-				       be16_to_cpu(txbdp->length),
-				       DMA_TO_DEVICE);
-		}
-		txbdp++;
-		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
-		tx_queue->tx_skbuff[i] = NULL;
-	}
-	kfree(tx_queue->tx_skbuff);
-	tx_queue->tx_skbuff = NULL;
-}
-
-static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
-{
-	int i;
-
-	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
-
-	if (rx_queue->skb)
-		dev_kfree_skb(rx_queue->skb);
-
-	for (i = 0; i < rx_queue->rx_ring_size; i++) {
-		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
-
-		rxbdp->lstatus = 0;
-		rxbdp->bufPtr = 0;
-		rxbdp++;
-
-		if (!rxb->page)
-			continue;
-
-		dma_unmap_page(rx_queue->dev, rxb->dma,
-			       PAGE_SIZE, DMA_FROM_DEVICE);
-		__free_page(rxb->page);
-
-		rxb->page = NULL;
-	}
-
-	kfree(rx_queue->rx_buff);
-	rx_queue->rx_buff = NULL;
-}
-
-/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff
- */
-static void free_skb_resources(struct gfar_private *priv)
-{
-	struct gfar_priv_tx_q *tx_queue = NULL;
-	struct gfar_priv_rx_q *rx_queue = NULL;
-	int i;
-
-	/* Go through all the buffer descriptors and free their data buffers */
-	for (i = 0; i < priv->num_tx_queues; i++) {
-		struct netdev_queue *txq;
-
-		tx_queue = priv->tx_queue[i];
-		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
-		if (tx_queue->tx_skbuff)
-			free_skb_tx_queue(tx_queue);
-		netdev_tx_reset_queue(txq);
-	}
-
-	for (i = 0; i < priv->num_rx_queues; i++) {
-		rx_queue = priv->rx_queue[i];
-		if (rx_queue->rx_buff)
-			free_skb_rx_queue(rx_queue);
-	}
-
-	dma_free_coherent(priv->dev,
-			  sizeof(struct txbd8) * priv->total_tx_ring_size +
-			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
-			  priv->tx_queue[0]->tx_bd_base,
-			  priv->tx_queue[0]->tx_bd_dma_base);
-}
-
-void gfar_start(struct gfar_private *priv)
-{
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 tempval;
-	int i = 0;
-
-	/* Enable Rx/Tx hw queues */
-	gfar_write(&regs->rqueue, priv->rqueue);
-	gfar_write(&regs->tqueue, priv->tqueue);
-
-	/* Initialize DMACTRL to have WWR and WOP */
-	tempval = gfar_read(&regs->dmactrl);
-	tempval |= DMACTRL_INIT_SETTINGS;
-	gfar_write(&regs->dmactrl, tempval);
-
-	/* Make sure we aren't stopped */
-	tempval = gfar_read(&regs->dmactrl);
-	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
-	gfar_write(&regs->dmactrl, tempval);
-
-	for (i = 0; i < priv->num_grps; i++) {
-		regs = priv->gfargrp[i].regs;
-		/* Clear THLT/RHLT, so that the DMA starts polling now */
-		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
-		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
-	}
-
-	/* Enable Rx/Tx DMA */
-	tempval = gfar_read(&regs->maccfg1);
-	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
-	gfar_write(&regs->maccfg1, tempval);
-
-	gfar_ints_enable(priv);
-
-	netif_trans_update(priv->ndev); /* prevent tx timeout */
-}
-
-static void free_grp_irqs(struct gfar_priv_grp *grp)
-{
-	free_irq(gfar_irq(grp, TX)->irq, grp);
-	free_irq(gfar_irq(grp, RX)->irq, grp);
-	free_irq(gfar_irq(grp, ER)->irq, grp);
-}
-
-static int register_grp_irqs(struct gfar_priv_grp *grp)
-{
-	struct gfar_private *priv = grp->priv;
-	struct net_device *dev = priv->ndev;
-	int err;
-
-	/* If the device has multiple interrupts, register for
-	 * them.  Otherwise, only register for the one
-	 */
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		/* Install our interrupt handlers for Error,
-		 * Transmit, and Receive
-		 */
-		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
-				  gfar_irq(grp, ER)->name, grp);
-		if (err < 0) {
-			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
-				  gfar_irq(grp, ER)->irq);
-
-			goto err_irq_fail;
-		}
-		enable_irq_wake(gfar_irq(grp, ER)->irq);
-
-		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
-				  gfar_irq(grp, TX)->name, grp);
-		if (err < 0) {
-			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
-				  gfar_irq(grp, TX)->irq);
-			goto tx_irq_fail;
-		}
-		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
-				  gfar_irq(grp, RX)->name, grp);
-		if (err < 0) {
-			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
-				  gfar_irq(grp, RX)->irq);
-			goto rx_irq_fail;
-		}
-		enable_irq_wake(gfar_irq(grp, RX)->irq);
-
-	} else {
-		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
-				  gfar_irq(grp, TX)->name, grp);
-		if (err < 0) {
-			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
-				  gfar_irq(grp, TX)->irq);
-			goto err_irq_fail;
-		}
-		enable_irq_wake(gfar_irq(grp, TX)->irq);
-	}
-
-	return 0;
-
-rx_irq_fail:
-	free_irq(gfar_irq(grp, TX)->irq, grp);
-tx_irq_fail:
-	free_irq(gfar_irq(grp, ER)->irq, grp);
-err_irq_fail:
-	return err;
-
-}
-
-static void gfar_free_irq(struct gfar_private *priv)
-{
-	int i;
-
-	/* Free the IRQs */
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		for (i = 0; i < priv->num_grps; i++)
-			free_grp_irqs(&priv->gfargrp[i]);
-	} else {
-		for (i = 0; i < priv->num_grps; i++)
-			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
-				 &priv->gfargrp[i]);
-	}
-}
-
-static int gfar_request_irq(struct gfar_private *priv)
-{
-	int err, i, j;
-
-	for (i = 0; i < priv->num_grps; i++) {
-		err = register_grp_irqs(&priv->gfargrp[i]);
-		if (err) {
-			for (j = 0; j < i; j++)
-				free_grp_irqs(&priv->gfargrp[j]);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
-{
-	struct gfar_private *priv = netdev_priv(ndev);
-	int err;
-
-	gfar_mac_reset(priv);
-
-	err = gfar_alloc_skb_resources(ndev);
-	if (err)
-		return err;
-
-	gfar_init_tx_rx_base(priv);
-
-	smp_mb__before_atomic();
-	clear_bit(GFAR_DOWN, &priv->state);
-	smp_mb__after_atomic();
-
-	/* Start Rx/Tx DMA and enable the interrupts */
-	gfar_start(priv);
-
-	/* force link state update after mac reset */
-	priv->oldlink = 0;
-	priv->oldspeed = 0;
-	priv->oldduplex = -1;
-
-	phy_start(ndev->phydev);
-
-	enable_napi(priv);
-
-	netif_tx_wake_all_queues(ndev);
-
-	return 0;
-}
-
-/* Called when something needs to use the ethernet device
- * Returns 0 for success.
- */
-static int gfar_enet_open(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	int err;
-
-	err = init_phy(dev);
-	if (err)
-		return err;
-
-	err = gfar_request_irq(priv);
-	if (err)
-		return err;
-
-	err = startup_gfar(dev);
-	if (err)
-		return err;
-
-	return err;
-}
-
-static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
-{
-	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
-
-	memset(fcb, 0, GMAC_FCB_LEN);
-
-	return fcb;
-}
-
-static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
-				    int fcb_length)
-{
-	/* If we're here, it's a IP packet with a TCP or UDP
-	 * payload.  We set it to checksum, using a pseudo-header
-	 * we provide
-	 */
-	u8 flags = TXFCB_DEFAULT;
-
-	/* Tell the controller what the protocol is
-	 * And provide the already calculated phcs
-	 */
-	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
-		flags |= TXFCB_UDP;
-		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
-	} else
-		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
-
-	/* l3os is the distance between the start of the
-	 * frame (skb->data) and the start of the IP hdr.
-	 * l4os is the distance between the start of the
-	 * l3 hdr and the l4 hdr
-	 */
-	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
-	fcb->l4os = skb_network_header_len(skb);
-
-	fcb->flags = flags;
-}
-
-static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
-{
-	fcb->flags |= TXFCB_VLN;
-	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
-}
-
-static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
-				      struct txbd8 *base, int ring_size)
-{
-	struct txbd8 *new_bd = bdp + stride;
-
-	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
-}
-
-static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
-				      int ring_size)
-{
-	return skip_txbd(bdp, 1, base, ring_size);
-}
-
-/* eTSEC12: csum generation not supported for some fcb offsets */
-static inline bool gfar_csum_errata_12(struct gfar_private *priv,
-				       unsigned long fcb_addr)
-{
-	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
-	       (fcb_addr % 0x20) > 0x18);
-}
-
-/* eTSEC76: csum generation for frames larger than 2500 may
- * cause excess delays before start of transmission
- */
-static inline bool gfar_csum_errata_76(struct gfar_private *priv,
-				       unsigned int len)
-{
-	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
-	       (len > 2500));
-}
-
-/* This is called by the kernel when a frame is ready for transmission.
- * It is pointed to by the dev->hard_start_xmit function pointer
- */
-static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar_priv_tx_q *tx_queue = NULL;
-	struct netdev_queue *txq;
-	struct gfar __iomem *regs = NULL;
-	struct txfcb *fcb = NULL;
-	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
-	u32 lstatus;
-	skb_frag_t *frag;
-	int i, rq = 0;
-	int do_tstamp, do_csum, do_vlan;
-	u32 bufaddr;
-	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
-
-	rq = skb->queue_mapping;
-	tx_queue = priv->tx_queue[rq];
-	txq = netdev_get_tx_queue(dev, rq);
-	base = tx_queue->tx_bd_base;
-	regs = tx_queue->grp->regs;
-
-	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
-	do_vlan = skb_vlan_tag_present(skb);
-	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-		    priv->hwts_tx_en;
-
-	if (do_csum || do_vlan)
-		fcb_len = GMAC_FCB_LEN;
-
-	/* check if time stamp should be generated */
-	if (unlikely(do_tstamp))
-		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
-
-	/* make space for additional header when fcb is needed */
-	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
-		struct sk_buff *skb_new;
-
-		skb_new = skb_realloc_headroom(skb, fcb_len);
-		if (!skb_new) {
-			dev->stats.tx_errors++;
-			dev_kfree_skb_any(skb);
-			return NETDEV_TX_OK;
-		}
-
-		if (skb->sk)
-			skb_set_owner_w(skb_new, skb->sk);
-		dev_consume_skb_any(skb);
-		skb = skb_new;
-	}
-
-	/* total number of fragments in the SKB */
-	nr_frags = skb_shinfo(skb)->nr_frags;
-
-	/* calculate the required number of TxBDs for this skb */
-	if (unlikely(do_tstamp))
-		nr_txbds = nr_frags + 2;
-	else
-		nr_txbds = nr_frags + 1;
-
-	/* check if there is space to queue this packet */
-	if (nr_txbds > tx_queue->num_txbdfree) {
-		/* no space, stop the queue */
-		netif_tx_stop_queue(txq);
-		dev->stats.tx_fifo_errors++;
-		return NETDEV_TX_BUSY;
-	}
-
-	/* Update transmit stats */
-	bytes_sent = skb->len;
-	tx_queue->stats.tx_bytes += bytes_sent;
-	/* keep Tx bytes on wire for BQL accounting */
-	GFAR_CB(skb)->bytes_sent = bytes_sent;
-	tx_queue->stats.tx_packets++;
-
-	txbdp = txbdp_start = tx_queue->cur_tx;
-	lstatus = be32_to_cpu(txbdp->lstatus);
-
-	/* Add TxPAL between FCB and frame if required */
-	if (unlikely(do_tstamp)) {
-		skb_push(skb, GMAC_TXPAL_LEN);
-		memset(skb->data, 0, GMAC_TXPAL_LEN);
-	}
-
-	/* Add TxFCB if required */
-	if (fcb_len) {
-		fcb = gfar_add_fcb(skb);
-		lstatus |= BD_LFLAG(TXBD_TOE);
-	}
-
-	/* Set up checksumming */
-	if (do_csum) {
-		gfar_tx_checksum(skb, fcb, fcb_len);
-
-		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
-		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
-			__skb_pull(skb, GMAC_FCB_LEN);
-			skb_checksum_help(skb);
-			if (do_vlan || do_tstamp) {
-				/* put back a new fcb for vlan/tstamp TOE */
-				fcb = gfar_add_fcb(skb);
-			} else {
-				/* Tx TOE not used */
-				lstatus &= ~(BD_LFLAG(TXBD_TOE));
-				fcb = NULL;
-			}
-		}
-	}
-
-	if (do_vlan)
-		gfar_tx_vlan(skb, fcb);
-
-	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
-				 DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
-		goto dma_map_err;
-
-	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
-
-	/* Time stamp insertion requires one additional TxBD */
-	if (unlikely(do_tstamp))
-		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
-						 tx_queue->tx_ring_size);
-
-	if (likely(!nr_frags)) {
-		if (likely(!do_tstamp))
-			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-	} else {
-		u32 lstatus_start = lstatus;
-
-		/* Place the fragment addresses and lengths into the TxBDs */
-		frag = &skb_shinfo(skb)->frags[0];
-		for (i = 0; i < nr_frags; i++, frag++) {
-			unsigned int size;
-
-			/* Point at the next BD, wrapping as needed */
-			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-
-			size = skb_frag_size(frag);
-
-			lstatus = be32_to_cpu(txbdp->lstatus) | size |
-				  BD_LFLAG(TXBD_READY);
-
-			/* Handle the last BD specially */
-			if (i == nr_frags - 1)
-				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-
-			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
-						   size, DMA_TO_DEVICE);
-			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
-				goto dma_map_err;
-
-			/* set the TxBD length and buffer pointer */
-			txbdp->bufPtr = cpu_to_be32(bufaddr);
-			txbdp->lstatus = cpu_to_be32(lstatus);
-		}
-
-		lstatus = lstatus_start;
-	}
-
-	/* If time stamping is requested one additional TxBD must be set up. The
-	 * first TxBD points to the FCB and must have a data length of
-	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
-	 * the full frame length.
-	 */
-	if (unlikely(do_tstamp)) {
-		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
-
-		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
-		bufaddr += fcb_len;
-
-		lstatus_ts |= BD_LFLAG(TXBD_READY) |
-			      (skb_headlen(skb) - fcb_len);
-		if (!nr_frags)
-			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-
-		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
-		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
-		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
-
-		/* Setup tx hardware time stamping */
-		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-		fcb->ptp = 1;
-	} else {
-		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
-	}
-
-	netdev_tx_sent_queue(txq, bytes_sent);
-
-	gfar_wmb();
-
-	txbdp_start->lstatus = cpu_to_be32(lstatus);
-
-	gfar_wmb(); /* force lstatus write before tx_skbuff */
-
-	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
-
-	/* Update the current skb pointer to the next entry we will use
-	 * (wrapping if necessary)
-	 */
-	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
-			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
-
-	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-
-	/* We can work in parallel with gfar_clean_tx_ring(), except
-	 * when modifying num_txbdfree. Note that we didn't grab the lock
-	 * when we were reading the num_txbdfree and checking for available
-	 * space, that's because outside of this function it can only grow.
-	 */
-	spin_lock_bh(&tx_queue->txlock);
-	/* reduce TxBD free count */
-	tx_queue->num_txbdfree -= (nr_txbds);
-	spin_unlock_bh(&tx_queue->txlock);
-
-	/* If the next BD still needs to be cleaned up, then the bds
-	 * are full.  We need to tell the kernel to stop sending us stuff.
-	 */
-	if (!tx_queue->num_txbdfree) {
-		netif_tx_stop_queue(txq);
-
-		dev->stats.tx_fifo_errors++;
-	}
-
-	/* Tell the DMA to go go go */
-	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
-
-	return NETDEV_TX_OK;
-
-dma_map_err:
-	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
-	if (do_tstamp)
-		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-	for (i = 0; i < nr_frags; i++) {
-		lstatus = be32_to_cpu(txbdp->lstatus);
-		if (!(lstatus & BD_LFLAG(TXBD_READY)))
-			break;
-
-		lstatus &= ~BD_LFLAG(TXBD_READY);
-		txbdp->lstatus = cpu_to_be32(lstatus);
-		bufaddr = be32_to_cpu(txbdp->bufPtr);
-		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
-			       DMA_TO_DEVICE);
-		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-	}
-	gfar_wmb();
-	dev_kfree_skb_any(skb);
-	return NETDEV_TX_OK;
-}
-
-/* Stops the kernel queue, and halts the controller */
-static int gfar_close(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-
-	cancel_work_sync(&priv->reset_task);
-	stop_gfar(dev);
-
-	/* Disconnect from the PHY */
-	phy_disconnect(dev->phydev);
-
-	gfar_free_irq(priv);
-
-	return 0;
-}
-
-/* Changes the mac address if the controller is not running. */
-static int gfar_set_mac_address(struct net_device *dev)
-{
-	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
-
-	return 0;
-}
-
-static int gfar_change_mtu(struct net_device *dev, int new_mtu)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-
-	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
-		cpu_relax();
-
-	if (dev->flags & IFF_UP)
-		stop_gfar(dev);
-
-	dev->mtu = new_mtu;
-
-	if (dev->flags & IFF_UP)
-		startup_gfar(dev);
-
-	clear_bit_unlock(GFAR_RESETTING, &priv->state);
-
-	return 0;
-}
-
-void reset_gfar(struct net_device *ndev)
-{
-	struct gfar_private *priv = netdev_priv(ndev);
-
-	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
-		cpu_relax();
-
-	stop_gfar(ndev);
-	startup_gfar(ndev);
-
-	clear_bit_unlock(GFAR_RESETTING, &priv->state);
-}
-
-/* gfar_reset_task gets scheduled when a packet has not been
- * transmitted after a set amount of time.
- * For now, assume that clearing out all the structures, and
- * starting over will fix the problem.
- */
-static void gfar_reset_task(struct work_struct *work)
-{
-	struct gfar_private *priv = container_of(work, struct gfar_private,
-						 reset_task);
-	reset_gfar(priv->ndev);
-}
-
-static void gfar_timeout(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-
-	dev->stats.tx_errors++;
-	schedule_work(&priv->reset_task);
-}
-
-/* Interrupt Handler for Transmit complete */
-static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
-{
-	struct net_device *dev = tx_queue->dev;
-	struct netdev_queue *txq;
-	struct gfar_private *priv = netdev_priv(dev);
-	struct txbd8 *bdp, *next = NULL;
-	struct txbd8 *lbdp = NULL;
-	struct txbd8 *base = tx_queue->tx_bd_base;
-	struct sk_buff *skb;
-	int skb_dirtytx;
-	int tx_ring_size = tx_queue->tx_ring_size;
-	int frags = 0, nr_txbds = 0;
-	int i;
-	int howmany = 0;
-	int tqi = tx_queue->qindex;
-	unsigned int bytes_sent = 0;
-	u32 lstatus;
-	size_t buflen;
-
-	txq = netdev_get_tx_queue(dev, tqi);
-	bdp = tx_queue->dirty_tx;
-	skb_dirtytx = tx_queue->skb_dirtytx;
-
-	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
-
-		frags = skb_shinfo(skb)->nr_frags;
-
-		/* When time stamping, one additional TxBD must be freed.
-		 * Also, we need to dma_unmap_single() the TxPAL.
-		 */
-		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
-			nr_txbds = frags + 2;
-		else
-			nr_txbds = frags + 1;
-
-		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
-
-		lstatus = be32_to_cpu(lbdp->lstatus);
-
-		/* Only clean completed frames */
-		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
-		    (lstatus & BD_LENGTH_MASK))
-			break;
-
-		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
-			next = next_txbd(bdp, base, tx_ring_size);
-			buflen = be16_to_cpu(next->length) +
-				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
-		} else
-			buflen = be16_to_cpu(bdp->length);
-
-		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
-				 buflen, DMA_TO_DEVICE);
-
-		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
-			struct skb_shared_hwtstamps shhwtstamps;
-			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
-					  ~0x7UL);
-
-			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
-			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
-			skb_tstamp_tx(skb, &shhwtstamps);
-			gfar_clear_txbd_status(bdp);
-			bdp = next;
-		}
-
-		gfar_clear_txbd_status(bdp);
-		bdp = next_txbd(bdp, base, tx_ring_size);
-
-		for (i = 0; i < frags; i++) {
-			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
-				       be16_to_cpu(bdp->length),
-				       DMA_TO_DEVICE);
-			gfar_clear_txbd_status(bdp);
-			bdp = next_txbd(bdp, base, tx_ring_size);
-		}
-
-		bytes_sent += GFAR_CB(skb)->bytes_sent;
-
-		dev_kfree_skb_any(skb);
-
-		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
-
-		skb_dirtytx = (skb_dirtytx + 1) &
-			      TX_RING_MOD_MASK(tx_ring_size);
-
-		howmany++;
-		spin_lock(&tx_queue->txlock);
-		tx_queue->num_txbdfree += nr_txbds;
-		spin_unlock(&tx_queue->txlock);
-	}
-
-	/* If we freed a buffer, we can restart transmission, if necessary */
-	if (tx_queue->num_txbdfree &&
-	    netif_tx_queue_stopped(txq) &&
-	    !(test_bit(GFAR_DOWN, &priv->state)))
-		netif_wake_subqueue(priv->ndev, tqi);
-
-	/* Update dirty indicators */
-	tx_queue->skb_dirtytx = skb_dirtytx;
-	tx_queue->dirty_tx = bdp;
-
-	netdev_tx_completed_queue(txq, howmany, bytes_sent);
-}
-
-static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
-{
-	struct page *page;
-	dma_addr_t addr;
-
-	page = dev_alloc_page();
-	if (unlikely(!page))
-		return false;
-
-	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
-		__free_page(page);
-
-		return false;
-	}
-
-	rxb->dma = addr;
-	rxb->page = page;
-	rxb->page_offset = 0;
-
-	return true;
-}
-
-static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
-{
-	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
-	struct gfar_extra_stats *estats = &priv->extra_stats;
-
-	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
-	atomic64_inc(&estats->rx_alloc_err);
-}
-
-static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
-				int alloc_cnt)
-{
-	struct rxbd8 *bdp;
-	struct gfar_rx_buff *rxb;
-	int i;
-
-	i = rx_queue->next_to_use;
-	bdp = &rx_queue->rx_bd_base[i];
-	rxb = &rx_queue->rx_buff[i];
-
-	while (alloc_cnt--) {
-		/* try reuse page */
-		if (unlikely(!rxb->page)) {
-			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
-				gfar_rx_alloc_err(rx_queue);
-				break;
-			}
-		}
-
-		/* Setup the new RxBD */
-		gfar_init_rxbdp(rx_queue, bdp,
-				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
-
-		/* Update to the next pointer */
-		bdp++;
-		rxb++;
-
-		if (unlikely(++i == rx_queue->rx_ring_size)) {
-			i = 0;
-			bdp = rx_queue->rx_bd_base;
-			rxb = rx_queue->rx_buff;
-		}
-	}
-
-	rx_queue->next_to_use = i;
-	rx_queue->next_to_alloc = i;
-}
-
-static void count_errors(u32 lstatus, struct net_device *ndev)
-{
-	struct gfar_private *priv = netdev_priv(ndev);
-	struct net_device_stats *stats = &ndev->stats;
-	struct gfar_extra_stats *estats = &priv->extra_stats;
-
-	/* If the packet was truncated, none of the other errors matter */
-	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
-		stats->rx_length_errors++;
-
-		atomic64_inc(&estats->rx_trunc);
-
-		return;
-	}
-	/* Count the errors, if there were any */
-	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
-		stats->rx_length_errors++;
-
-		if (lstatus & BD_LFLAG(RXBD_LARGE))
-			atomic64_inc(&estats->rx_large);
-		else
-			atomic64_inc(&estats->rx_short);
-	}
-	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
-		stats->rx_frame_errors++;
-		atomic64_inc(&estats->rx_nonoctet);
-	}
-	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
-		atomic64_inc(&estats->rx_crcerr);
-		stats->rx_crc_errors++;
-	}
-	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
-		atomic64_inc(&estats->rx_overrun);
-		stats->rx_over_errors++;
-	}
-}
-
-irqreturn_t gfar_receive(int irq, void *grp_id)
-{
-	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
-	unsigned long flags;
-	u32 imask, ievent;
-
-	ievent = gfar_read(&grp->regs->ievent);
-
-	if (unlikely(ievent & IEVENT_FGPI)) {
-		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
-		return IRQ_HANDLED;
-	}
-
-	if (likely(napi_schedule_prep(&grp->napi_rx))) {
-		spin_lock_irqsave(&grp->grplock, flags);
-		imask = gfar_read(&grp->regs->imask);
-		imask &= IMASK_RX_DISABLED;
-		gfar_write(&grp->regs->imask, imask);
-		spin_unlock_irqrestore(&grp->grplock, flags);
-		__napi_schedule(&grp->napi_rx);
-	} else {
-		/* Clear IEVENT, so interrupts aren't called again
-		 * because of the packets that have already arrived.
-		 */
-		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
-	}
-
-	return IRQ_HANDLED;
-}
-
-/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *grp_id)
-{
-	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
-	unsigned long flags;
-	u32 imask;
-
-	if (likely(napi_schedule_prep(&grp->napi_tx))) {
-		spin_lock_irqsave(&grp->grplock, flags);
-		imask = gfar_read(&grp->regs->imask);
-		imask &= IMASK_TX_DISABLED;
-		gfar_write(&grp->regs->imask, imask);
-		spin_unlock_irqrestore(&grp->grplock, flags);
-		__napi_schedule(&grp->napi_tx);
-	} else {
-		/* Clear IEVENT, so interrupts aren't called again
-		 * because of the packets that have already arrived.
-		 */
-		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
-			     struct sk_buff *skb, bool first)
-{
-	int size = lstatus & BD_LENGTH_MASK;
-	struct page *page = rxb->page;
-
-	if (likely(first)) {
-		skb_put(skb, size);
-	} else {
-		/* the last fragments' length contains the full frame length */
-		if (lstatus & BD_LFLAG(RXBD_LAST))
-			size -= skb->len;
-
-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-				rxb->page_offset + RXBUF_ALIGNMENT,
-				size, GFAR_RXB_TRUESIZE);
-	}
-
-	/* try reuse page */
-	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
-		return false;
-
-	/* change offset to the other half */
-	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
-
-	page_ref_inc(page);
-
-	return true;
-}
-
-static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
-			       struct gfar_rx_buff *old_rxb)
-{
-	struct gfar_rx_buff *new_rxb;
-	u16 nta = rxq->next_to_alloc;
-
-	new_rxb = &rxq->rx_buff[nta];
-
-	/* find next buf that can reuse a page */
-	nta++;
-	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
-
-	/* copy page reference */
-	*new_rxb = *old_rxb;
-
-	/* sync for use by the device */
-	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
-					 old_rxb->page_offset,
-					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
-}
-
-static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
-					    u32 lstatus, struct sk_buff *skb)
-{
-	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
-	struct page *page = rxb->page;
-	bool first = false;
-
-	if (likely(!skb)) {
-		void *buff_addr = page_address(page) + rxb->page_offset;
-
-		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
-		if (unlikely(!skb)) {
-			gfar_rx_alloc_err(rx_queue);
-			return NULL;
-		}
-		skb_reserve(skb, RXBUF_ALIGNMENT);
-		first = true;
-	}
-
-	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
-				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
-
-	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
-		/* reuse the free half of the page */
-		gfar_reuse_rx_page(rx_queue, rxb);
-	} else {
-		/* page cannot be reused, unmap it */
-		dma_unmap_page(rx_queue->dev, rxb->dma,
-			       PAGE_SIZE, DMA_FROM_DEVICE);
-	}
-
-	/* clear rxb content */
-	rxb->page = NULL;
-
-	return skb;
-}
-
-static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
-{
-	/* If valid headers were found, and valid sums
-	 * were verified, then we tell the kernel that no
-	 * checksumming is necessary.  Otherwise, it is [FIXME]
-	 */
-	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
-	    (RXFCB_CIP | RXFCB_CTU))
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	else
-		skb_checksum_none_assert(skb);
-}
-
-/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
-{
-	struct gfar_private *priv = netdev_priv(ndev);
-	struct rxfcb *fcb = NULL;
-
-	/* fcb is at the beginning if exists */
-	fcb = (struct rxfcb *)skb->data;
-
-	/* Remove the FCB from the skb
-	 * Remove the padded bytes, if there are any
-	 */
-	if (priv->uses_rxfcb)
-		skb_pull(skb, GMAC_FCB_LEN);
-
-	/* Get receive timestamp from the skb */
-	if (priv->hwts_rx_en) {
-		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
-		u64 *ns = (u64 *) skb->data;
-
-		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
-	}
-
-	if (priv->padding)
-		skb_pull(skb, priv->padding);
-
-	/* Trim off the FCS */
-	pskb_trim(skb, skb->len - ETH_FCS_LEN);
-
-	if (ndev->features & NETIF_F_RXCSUM)
-		gfar_rx_checksum(skb, fcb);
-
-	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
-	 * Even if vlan rx accel is disabled, on some chips
-	 * RXFCB_VLN is pseudo randomly set.
-	 */
-	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-				       be16_to_cpu(fcb->vlctl));
-}
-
-/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
- * until the budget/quota has been reached. Returns the number
- * of frames handled
- */
-int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
-{
-	struct net_device *ndev = rx_queue->ndev;
-	struct gfar_private *priv = netdev_priv(ndev);
-	struct rxbd8 *bdp;
-	int i, howmany = 0;
-	struct sk_buff *skb = rx_queue->skb;
-	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
-	unsigned int total_bytes = 0, total_pkts = 0;
-
-	/* Get the first full descriptor */
-	i = rx_queue->next_to_clean;
-
-	while (rx_work_limit--) {
-		u32 lstatus;
-
-		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
-			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
-			cleaned_cnt = 0;
-		}
-
-		bdp = &rx_queue->rx_bd_base[i];
-		lstatus = be32_to_cpu(bdp->lstatus);
-		if (lstatus & BD_LFLAG(RXBD_EMPTY))
-			break;
-
-		/* order rx buffer descriptor reads */
-		rmb();
-
-		/* fetch next to clean buffer from the ring */
-		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
-		if (unlikely(!skb))
-			break;
-
-		cleaned_cnt++;
-		howmany++;
-
-		if (unlikely(++i == rx_queue->rx_ring_size))
-			i = 0;
-
-		rx_queue->next_to_clean = i;
-
-		/* fetch next buffer if not the last in frame */
-		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
-			continue;
-
-		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
-			count_errors(lstatus, ndev);
-
-			/* discard faulty buffer */
-			dev_kfree_skb(skb);
-			skb = NULL;
-			rx_queue->stats.rx_dropped++;
-			continue;
-		}
-
-		gfar_process_frame(ndev, skb);
-
-		/* Increment the number of packets */
-		total_pkts++;
-		total_bytes += skb->len;
-
-		skb_record_rx_queue(skb, rx_queue->qindex);
-
-		skb->protocol = eth_type_trans(skb, ndev);
-
-		/* Send the packet up the stack */
-		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
-
-		skb = NULL;
-	}
-
-	/* Store incomplete frames for completion */
-	rx_queue->skb = skb;
-
-	rx_queue->stats.rx_packets += total_pkts;
-	rx_queue->stats.rx_bytes += total_bytes;
-
-	if (cleaned_cnt)
-		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
-
-	/* Update Last Free RxBD pointer for LFC */
-	if (unlikely(priv->tx_actual_en)) {
-		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
-
-		gfar_write(rx_queue->rfbptr, bdp_dma);
-	}
-
-	return howmany;
-}
-
-static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
-{
-	struct gfar_priv_grp *gfargrp =
-		container_of(napi, struct gfar_priv_grp, napi_rx);
-	struct gfar __iomem *regs = gfargrp->regs;
-	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
-	int work_done = 0;
-
-	/* Clear IEVENT, so interrupts aren't called again
-	 * because of the packets that have already arrived
-	 */
-	gfar_write(&regs->ievent, IEVENT_RX_MASK);
-
-	work_done = gfar_clean_rx_ring(rx_queue, budget);
-
-	if (work_done < budget) {
-		u32 imask;
-		napi_complete_done(napi, work_done);
-		/* Clear the halt bit in RSTAT */
-		gfar_write(&regs->rstat, gfargrp->rstat);
-
-		spin_lock_irq(&gfargrp->grplock);
-		imask = gfar_read(&regs->imask);
-		imask |= IMASK_RX_DEFAULT;
-		gfar_write(&regs->imask, imask);
-		spin_unlock_irq(&gfargrp->grplock);
-	}
-
-	return work_done;
-}
-
-static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
-{
-	struct gfar_priv_grp *gfargrp =
-		container_of(napi, struct gfar_priv_grp, napi_tx);
-	struct gfar __iomem *regs = gfargrp->regs;
-	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
-	u32 imask;
-
-	/* Clear IEVENT, so interrupts aren't called again
-	 * because of the packets that have already arrived
-	 */
-	gfar_write(&regs->ievent, IEVENT_TX_MASK);
-
-	/* run Tx cleanup to completion */
-	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
-		gfar_clean_tx_ring(tx_queue);
-
-	napi_complete(napi);
-
-	spin_lock_irq(&gfargrp->grplock);
-	imask = gfar_read(&regs->imask);
-	imask |= IMASK_TX_DEFAULT;
-	gfar_write(&regs->imask, imask);
-	spin_unlock_irq(&gfargrp->grplock);
-
-	return 0;
-}
-
-static int gfar_poll_rx(struct napi_struct *napi, int budget)
-{
-	struct gfar_priv_grp *gfargrp =
-		container_of(napi, struct gfar_priv_grp, napi_rx);
-	struct gfar_private *priv = gfargrp->priv;
-	struct gfar __iomem *regs = gfargrp->regs;
-	struct gfar_priv_rx_q *rx_queue = NULL;
-	int work_done = 0, work_done_per_q = 0;
-	int i, budget_per_q = 0;
-	unsigned long rstat_rxf;
-	int num_act_queues;
-
-	/* Clear IEVENT, so interrupts aren't called again
-	 * because of the packets that have already arrived
-	 */
-	gfar_write(&regs->ievent, IEVENT_RX_MASK);
-
-	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
-
-	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
-	if (num_act_queues)
-		budget_per_q = budget/num_act_queues;
-
-	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
-		/* skip queue if not active */
-		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
-			continue;
-
-		rx_queue = priv->rx_queue[i];
-		work_done_per_q =
-			gfar_clean_rx_ring(rx_queue, budget_per_q);
-		work_done += work_done_per_q;
-
-		/* finished processing this queue */
-		if (work_done_per_q < budget_per_q) {
-			/* clear active queue hw indication */
-			gfar_write(&regs->rstat,
-				   RSTAT_CLEAR_RXF0 >> i);
-			num_act_queues--;
-
-			if (!num_act_queues)
-				break;
-		}
-	}
-
-	if (!num_act_queues) {
-		u32 imask;
-		napi_complete_done(napi, work_done);
-
-		/* Clear the halt bit in RSTAT */
-		gfar_write(&regs->rstat, gfargrp->rstat);
-
-		spin_lock_irq(&gfargrp->grplock);
-		imask = gfar_read(&regs->imask);
-		imask |= IMASK_RX_DEFAULT;
-		gfar_write(&regs->imask, imask);
-		spin_unlock_irq(&gfargrp->grplock);
-	}
-
-	return work_done;
-}
-
-static int gfar_poll_tx(struct napi_struct *napi, int budget)
-{
-	struct gfar_priv_grp *gfargrp =
-		container_of(napi, struct gfar_priv_grp, napi_tx);
-	struct gfar_private *priv = gfargrp->priv;
-	struct gfar __iomem *regs = gfargrp->regs;
-	struct gfar_priv_tx_q *tx_queue = NULL;
-	int has_tx_work = 0;
-	int i;
-
-	/* Clear IEVENT, so interrupts aren't called again
-	 * because of the packets that have already arrived
-	 */
-	gfar_write(&regs->ievent, IEVENT_TX_MASK);
-
-	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
-		tx_queue = priv->tx_queue[i];
-		/* run Tx cleanup to completion */
-		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
-			gfar_clean_tx_ring(tx_queue);
-			has_tx_work = 1;
-		}
-	}
-
-	if (!has_tx_work) {
-		u32 imask;
-		napi_complete(napi);
-
-		spin_lock_irq(&gfargrp->grplock);
-		imask = gfar_read(&regs->imask);
-		imask |= IMASK_TX_DEFAULT;
-		gfar_write(&regs->imask, imask);
-		spin_unlock_irq(&gfargrp->grplock);
-	}
-
-	return 0;
-}
-
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void gfar_netpoll(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	int i;
-
-	/* If the device has multiple interrupts, run tx/rx */
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		for (i = 0; i < priv->num_grps; i++) {
-			struct gfar_priv_grp *grp = &priv->gfargrp[i];
-
-			disable_irq(gfar_irq(grp, TX)->irq);
-			disable_irq(gfar_irq(grp, RX)->irq);
-			disable_irq(gfar_irq(grp, ER)->irq);
-			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
-			enable_irq(gfar_irq(grp, ER)->irq);
-			enable_irq(gfar_irq(grp, RX)->irq);
-			enable_irq(gfar_irq(grp, TX)->irq);
-		}
-	} else {
-		for (i = 0; i < priv->num_grps; i++) {
-			struct gfar_priv_grp *grp = &priv->gfargrp[i];
-
-			disable_irq(gfar_irq(grp, TX)->irq);
-			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
-			enable_irq(gfar_irq(grp, TX)->irq);
-		}
-	}
-}
-#endif
-
-/* The interrupt handler for devices with one interrupt */
-static irqreturn_t gfar_interrupt(int irq, void *grp_id)
-{
-	struct gfar_priv_grp *gfargrp = grp_id;
-
-	/* Save ievent for future reference */
-	u32 events = gfar_read(&gfargrp->regs->ievent);
-
-	/* Check for reception */
-	if (events & IEVENT_RX_MASK)
-		gfar_receive(irq, grp_id);
-
-	/* Check for transmit completion */
-	if (events & IEVENT_TX_MASK)
-		gfar_transmit(irq, grp_id);
-
-	/* Check for errors */
-	if (events & IEVENT_ERR_MASK)
-		gfar_error(irq, grp_id);
-
-	return IRQ_HANDLED;
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state.  The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void adjust_link(struct net_device *dev)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct phy_device *phydev = dev->phydev;
-
-	if (unlikely(phydev->link != priv->oldlink ||
-		     (phydev->link && (phydev->duplex != priv->oldduplex ||
-				       phydev->speed != priv->oldspeed))))
-		gfar_update_link_state(priv);
-}
-
-/* Update the hash table based on the current list of multicast
- * addresses we subscribe to.  Also, change the promiscuity of
- * the device based on the flags (this function is called
- * whenever dev->flags is changed
- */
-static void gfar_set_multi(struct net_device *dev)
-{
-	struct netdev_hw_addr *ha;
-	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 tempval;
-
-	if (dev->flags & IFF_PROMISC) {
-		/* Set RCTRL to PROM */
-		tempval = gfar_read(&regs->rctrl);
-		tempval |= RCTRL_PROM;
-		gfar_write(&regs->rctrl, tempval);
-	} else {
-		/* Set RCTRL to not PROM */
-		tempval = gfar_read(&regs->rctrl);
-		tempval &= ~(RCTRL_PROM);
-		gfar_write(&regs->rctrl, tempval);
-	}
-
-	if (dev->flags & IFF_ALLMULTI) {
-		/* Set the hash to rx all multicast frames */
-		gfar_write(&regs->igaddr0, 0xffffffff);
-		gfar_write(&regs->igaddr1, 0xffffffff);
-		gfar_write(&regs->igaddr2, 0xffffffff);
-		gfar_write(&regs->igaddr3, 0xffffffff);
-		gfar_write(&regs->igaddr4, 0xffffffff);
-		gfar_write(&regs->igaddr5, 0xffffffff);
-		gfar_write(&regs->igaddr6, 0xffffffff);
-		gfar_write(&regs->igaddr7, 0xffffffff);
-		gfar_write(&regs->gaddr0, 0xffffffff);
-		gfar_write(&regs->gaddr1, 0xffffffff);
-		gfar_write(&regs->gaddr2, 0xffffffff);
-		gfar_write(&regs->gaddr3, 0xffffffff);
-		gfar_write(&regs->gaddr4, 0xffffffff);
-		gfar_write(&regs->gaddr5, 0xffffffff);
-		gfar_write(&regs->gaddr6, 0xffffffff);
-		gfar_write(&regs->gaddr7, 0xffffffff);
-	} else {
-		int em_num;
-		int idx;
-
-		/* zero out the hash */
-		gfar_write(&regs->igaddr0, 0x0);
-		gfar_write(&regs->igaddr1, 0x0);
-		gfar_write(&regs->igaddr2, 0x0);
-		gfar_write(&regs->igaddr3, 0x0);
-		gfar_write(&regs->igaddr4, 0x0);
-		gfar_write(&regs->igaddr5, 0x0);
-		gfar_write(&regs->igaddr6, 0x0);
-		gfar_write(&regs->igaddr7, 0x0);
-		gfar_write(&regs->gaddr0, 0x0);
-		gfar_write(&regs->gaddr1, 0x0);
-		gfar_write(&regs->gaddr2, 0x0);
-		gfar_write(&regs->gaddr3, 0x0);
-		gfar_write(&regs->gaddr4, 0x0);
-		gfar_write(&regs->gaddr5, 0x0);
-		gfar_write(&regs->gaddr6, 0x0);
-		gfar_write(&regs->gaddr7, 0x0);
-
-		/* If we have extended hash tables, we need to
-		 * clear the exact match registers to prepare for
-		 * setting them
-		 */
-		if (priv->extended_hash) {
-			em_num = GFAR_EM_NUM + 1;
-			gfar_clear_exact_match(dev);
-			idx = 1;
-		} else {
-			idx = 0;
-			em_num = 0;
-		}
-
-		if (netdev_mc_empty(dev))
-			return;
-
-		/* Parse the list, and set the appropriate bits */
-		netdev_for_each_mc_addr(ha, dev) {
-			if (idx < em_num) {
-				gfar_set_mac_for_addr(dev, idx, ha->addr);
-				idx++;
-			} else
-				gfar_set_hash_for_addr(dev, ha->addr);
-		}
-	}
-}
-
-
-/* Clears each of the exact match registers to zero, so they
- * don't interfere with normal reception
- */
-static void gfar_clear_exact_match(struct net_device *dev)
-{
-	int idx;
-	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-
-	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
-		gfar_set_mac_for_addr(dev, idx, zero_arr);
-}
-
-/* Set the appropriate hash bit for the given addr */
-/* The algorithm works like so:
- * 1) Take the Destination Address (ie the multicast address), and
- * do a CRC on it (little endian), and reverse the bits of the
- * result.
- * 2) Use the 8 most significant bits as a hash into a 256-entry
- * table.  The table is controlled through 8 32-bit registers:
- * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
- * gaddr7.  This means that the 3 most significant bits in the
- * hash index which gaddr register to use, and the 5 other bits
- * indicate which bit (assuming an IBM numbering scheme, which
- * for PowerPC (tm) is usually the case) in the register holds
- * the entry.
- */
-static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
-{
-	u32 tempval;
-	struct gfar_private *priv = netdev_priv(dev);
-	u32 result = ether_crc(ETH_ALEN, addr);
-	int width = priv->hash_width;
-	u8 whichbit = (result >> (32 - width)) & 0x1f;
-	u8 whichreg = result >> (32 - width + 5);
-	u32 value = (1 << (31-whichbit));
-
-	tempval = gfar_read(priv->hash_regs[whichreg]);
-	tempval |= value;
-	gfar_write(priv->hash_regs[whichreg], tempval);
-}
-
-
-/* There are multiple MAC Address register pairs on some controllers
- * This function sets the numth pair to a given address
- */
-static void gfar_set_mac_for_addr(struct net_device *dev, int num,
-				  const u8 *addr)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 tempval;
-	u32 __iomem *macptr = &regs->macstnaddr1;
-
-	macptr += num*2;
-
-	/* For a station address of 0x12345678ABCD in transmission
-	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
-	 * MACnADDR2 is set to 0x34120000.
-	 */
-	tempval = (addr[5] << 24) | (addr[4] << 16) |
-		  (addr[3] << 8)  |  addr[2];
-
-	gfar_write(macptr, tempval);
-
-	tempval = (addr[1] << 24) | (addr[0] << 16);
-
-	gfar_write(macptr+1, tempval);
-}
-
-/* GFAR error interrupt handler */
-static irqreturn_t gfar_error(int irq, void *grp_id)
-{
-	struct gfar_priv_grp *gfargrp = grp_id;
-	struct gfar __iomem *regs = gfargrp->regs;
-	struct gfar_private *priv= gfargrp->priv;
-	struct net_device *dev = priv->ndev;
-
-	/* Save ievent for future reference */
-	u32 events = gfar_read(&regs->ievent);
-
-	/* Clear IEVENT */
-	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
-
-	/* Magic Packet is not an error. */
-	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
-	    (events & IEVENT_MAG))
-		events &= ~IEVENT_MAG;
-
-	/* Hmm... */
-	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
-		netdev_dbg(dev,
-			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
-			   events, gfar_read(&regs->imask));
-
-	/* Update the error counters */
-	if (events & IEVENT_TXE) {
-		dev->stats.tx_errors++;
-
-		if (events & IEVENT_LC)
-			dev->stats.tx_window_errors++;
-		if (events & IEVENT_CRL)
-			dev->stats.tx_aborted_errors++;
-		if (events & IEVENT_XFUN) {
-			netif_dbg(priv, tx_err, dev,
-				  "TX FIFO underrun, packet dropped\n");
-			dev->stats.tx_dropped++;
-			atomic64_inc(&priv->extra_stats.tx_underrun);
-
-			schedule_work(&priv->reset_task);
-		}
-		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
-	}
-	if (events & IEVENT_BSY) {
-		dev->stats.rx_over_errors++;
-		atomic64_inc(&priv->extra_stats.rx_bsy);
-
-		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
-			  gfar_read(&regs->rstat));
-	}
-	if (events & IEVENT_BABR) {
-		dev->stats.rx_errors++;
-		atomic64_inc(&priv->extra_stats.rx_babr);
-
-		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
-	}
-	if (events & IEVENT_EBERR) {
-		atomic64_inc(&priv->extra_stats.eberr);
-		netif_dbg(priv, rx_err, dev, "bus error\n");
-	}
-	if (events & IEVENT_RXC)
-		netif_dbg(priv, rx_status, dev, "control frame\n");
-
-	if (events & IEVENT_BABT) {
-		atomic64_inc(&priv->extra_stats.tx_babt);
-		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
-	}
-	return IRQ_HANDLED;
-}
-
-static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
-{
-	struct net_device *ndev = priv->ndev;
-	struct phy_device *phydev = ndev->phydev;
-	u32 val = 0;
-
-	if (!phydev->duplex)
-		return val;
-
-	if (!priv->pause_aneg_en) {
-		if (priv->tx_pause_en)
-			val |= MACCFG1_TX_FLOW;
-		if (priv->rx_pause_en)
-			val |= MACCFG1_RX_FLOW;
-	} else {
-		u16 lcl_adv, rmt_adv;
-		u8 flowctrl;
-		/* get link partner capabilities */
-		rmt_adv = 0;
-		if (phydev->pause)
-			rmt_adv = LPA_PAUSE_CAP;
-		if (phydev->asym_pause)
-			rmt_adv |= LPA_PAUSE_ASYM;
-
-		lcl_adv = 0;
-		if (phydev->advertising & ADVERTISED_Pause)
-			lcl_adv |= ADVERTISE_PAUSE_CAP;
-		if (phydev->advertising & ADVERTISED_Asym_Pause)
-			lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
-		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
-		if (flowctrl & FLOW_CTRL_TX)
-			val |= MACCFG1_TX_FLOW;
-		if (flowctrl & FLOW_CTRL_RX)
-			val |= MACCFG1_RX_FLOW;
-	}
-
-	return val;
-}
-
-static noinline void gfar_update_link_state(struct gfar_private *priv)
-{
-	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	struct net_device *ndev = priv->ndev;
-	struct phy_device *phydev = ndev->phydev;
-	struct gfar_priv_rx_q *rx_queue = NULL;
-	int i;
-
-	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
-		return;
-
-	if (phydev->link) {
-		u32 tempval1 = gfar_read(&regs->maccfg1);
-		u32 tempval = gfar_read(&regs->maccfg2);
-		u32 ecntrl = gfar_read(&regs->ecntrl);
-		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
-
-		if (phydev->duplex != priv->oldduplex) {
-			if (!(phydev->duplex))
-				tempval &= ~(MACCFG2_FULL_DUPLEX);
-			else
-				tempval |= MACCFG2_FULL_DUPLEX;
-
-			priv->oldduplex = phydev->duplex;
-		}
-
-		if (phydev->speed != priv->oldspeed) {
-			switch (phydev->speed) {
-			case 1000:
-				tempval =
-				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
-
-				ecntrl &= ~(ECNTRL_R100);
-				break;
-			case 100:
-			case 10:
-				tempval =
-				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
-
-				/* Reduced mode distinguishes
-				 * between 10 and 100
-				 */
-				if (phydev->speed == SPEED_100)
-					ecntrl |= ECNTRL_R100;
-				else
-					ecntrl &= ~(ECNTRL_R100);
-				break;
-			default:
-				netif_warn(priv, link, priv->ndev,
-					   "Ack!  Speed (%d) is not 10/100/1000!\n",
-					   phydev->speed);
-				break;
-			}
-
-			priv->oldspeed = phydev->speed;
-		}
-
-		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-		tempval1 |= gfar_get_flowctrl_cfg(priv);
-
-		/* Turn last free buffer recording on */
-		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
-			for (i = 0; i < priv->num_rx_queues; i++) {
-				u32 bdp_dma;
-
-				rx_queue = priv->rx_queue[i];
-				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
-				gfar_write(rx_queue->rfbptr, bdp_dma);
-			}
-
-			priv->tx_actual_en = 1;
-		}
-
-		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
-			priv->tx_actual_en = 0;
-
-		gfar_write(&regs->maccfg1, tempval1);
-		gfar_write(&regs->maccfg2, tempval);
-		gfar_write(&regs->ecntrl, ecntrl);
-
-		if (!priv->oldlink)
-			priv->oldlink = 1;
-
-	} else if (priv->oldlink) {
-		priv->oldlink = 0;
-		priv->oldspeed = 0;
-		priv->oldduplex = -1;
-	}
-
-	if (netif_msg_link(priv))
-		phy_print_status(phydev);
-}
-
 static const struct of_device_id gfar_match[] =
 {
 	{
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 8e42c02..f472a6d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * drivers/net/ethernet/freescale/gianfar.h
  *
@@ -11,11 +12,6 @@
  *
  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
  *  Still left to do:
  *      -Add support for module parameters
  *	-Add patch for ethtool phys id
@@ -71,8 +67,6 @@
 /* Number of bytes to align the rx bufs to */
 #define RXBUF_ALIGNMENT 64
 
-#define PHY_INIT_TIMEOUT 100000
-
 #define DRV_NAME "gfar-enet"
 extern const char gfar_driver_version[];
 
@@ -92,10 +86,6 @@
 #define GFAR_RX_MAX_RING_SIZE   256
 #define GFAR_TX_MAX_RING_SIZE   256
 
-#define GFAR_MAX_FIFO_THRESHOLD 511
-#define GFAR_MAX_FIFO_STARVE	511
-#define GFAR_MAX_FIFO_STARVE_OFF 511
-
 #define FBTHR_SHIFT        24
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
@@ -113,9 +103,6 @@
 #define DEFAULT_FIFO_TX_THR 0x100
 #define DEFAULT_FIFO_TX_STARVE 0x40
 #define DEFAULT_FIFO_TX_STARVE_OFF 0x80
-#define DEFAULT_BD_STASH 1
-#define DEFAULT_STASH_LENGTH	96
-#define DEFAULT_STASH_INDEX	0
 
 /* The number of Exact Match registers */
 #define GFAR_EM_NUM	15
@@ -143,15 +130,6 @@
 #define DEFAULT_RX_COALESCE 0
 #define DEFAULT_RXCOUNT	0
 
-#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
-		| SUPPORTED_10baseT_Full \
-		| SUPPORTED_100baseT_Half \
-		| SUPPORTED_100baseT_Full \
-		| SUPPORTED_Autoneg \
-		| SUPPORTED_MII)
-
-#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full
-
 /* TBI register addresses */
 #define MII_TBICON		0x11
 
@@ -189,8 +167,6 @@
 #define ECNTRL_REDUCED_MII_MODE	0x00000004
 #define ECNTRL_SGMII_MODE	0x00000002
 
-#define MRBLR_INIT_SETTINGS	DEFAULT_RX_BUFFER_SIZE
-
 #define MINFLR_INIT_SETTINGS	0x00000040
 
 /* Tqueue control */
@@ -270,12 +246,6 @@
 #define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME)
 #define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME)
 
-#define skip_bd(bdp, stride, base, ring_size) ({ \
-	typeof(bdp) new_bd = (bdp) + (stride); \
-	(new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; })
-
-#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
-
 #define RCTRL_TS_ENABLE 	0x01000000
 #define RCTRL_PAL_MASK		0x001f0000
 #define RCTRL_LFC		0x00004000
@@ -389,11 +359,6 @@
 #define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
 #define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
 
-/* Fifo management */
-#define FIFO_TX_THR_MASK	0x01ff
-#define FIFO_TX_STARVE_MASK	0x01ff
-#define FIFO_TX_STARVE_OFF_MASK	0x01ff
-
 /* Attribute fields */
 
 /* This enables rx snooping for buffers and descriptors */
@@ -1330,16 +1295,9 @@
 	return bdp_dma;
 }
 
-irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
-void reset_gfar(struct net_device *dev);
 void gfar_mac_reset(struct gfar_private *priv);
-void gfar_halt(struct gfar_private *priv);
-void gfar_start(struct gfar_private *priv);
-void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
-		   u32 regnum, u32 read);
-void gfar_configure_coalescing_all(struct gfar_private *priv);
 int gfar_set_features(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
@@ -1352,13 +1310,6 @@
 #define RQFCR_PID_PORT_MASK 0xFFFF0000
 #define RQFCR_PID_MAC_MASK 0xFF000000
 
-struct gfar_mask_entry {
-	unsigned int mask; /* The mask value which is valid form start to end */
-	unsigned int start;
-	unsigned int end;
-	unsigned int block; /* Same block values indicate depended entries */
-};
-
 /* Represents a receive filer table entry */
 struct gfar_filer_entry {
 	u32 ctrl;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 395a526..3c8e4e2 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  drivers/net/ethernet/freescale/gianfar_ethtool.c
  *
@@ -10,10 +11,6 @@
  *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
  *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
- *
- *  This software may be used and distributed according to
- *  the terms of the GNU Public License, Version 2, incorporated herein
- *  by reference.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -48,19 +45,6 @@
 
 #define GFAR_MAX_COAL_USECS 0xffff
 #define GFAR_MAX_COAL_FRAMES 0xff
-static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
-			    u64 *buf);
-static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
-static int gfar_gcoalesce(struct net_device *dev,
-			  struct ethtool_coalesce *cvals);
-static int gfar_scoalesce(struct net_device *dev,
-			  struct ethtool_coalesce *cvals);
-static void gfar_gringparam(struct net_device *dev,
-			    struct ethtool_ringparam *rvals);
-static int gfar_sringparam(struct net_device *dev,
-			   struct ethtool_ringparam *rvals);
-static void gfar_gdrvinfo(struct net_device *dev,
-			  struct ethtool_drvinfo *drvinfo);
 
 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
 	/* extra stats */
@@ -230,7 +214,7 @@
 
 	/* Make sure we return a number greater than 0
 	 * if usecs > 0 */
-	return (usecs * 1000 + count - 1) / count;
+	return DIV_ROUND_UP(usecs * 1000, count);
 }
 
 /* Convert ethernet clock ticks to microseconds */
@@ -503,65 +487,44 @@
 	struct gfar_private *priv = netdev_priv(dev);
 	struct phy_device *phydev = dev->phydev;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	u32 oldadv, newadv;
 
 	if (!phydev)
 		return -ENODEV;
 
-	if (!(phydev->supported & SUPPORTED_Pause) ||
-	    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
-	     (epause->rx_pause != epause->tx_pause)))
+	if (!phy_validate_pause(phydev, epause))
 		return -EINVAL;
 
 	priv->rx_pause_en = priv->tx_pause_en = 0;
+	phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
 	if (epause->rx_pause) {
 		priv->rx_pause_en = 1;
 
 		if (epause->tx_pause) {
 			priv->tx_pause_en = 1;
-			/* FLOW_CTRL_RX & TX */
-			newadv = ADVERTISED_Pause;
-		} else  /* FLOW_CTLR_RX */
-			newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+		}
 	} else if (epause->tx_pause) {
 		priv->tx_pause_en = 1;
-		/* FLOW_CTLR_TX */
-		newadv = ADVERTISED_Asym_Pause;
-	} else
-		newadv = 0;
+	}
 
 	if (epause->autoneg)
 		priv->pause_aneg_en = 1;
 	else
 		priv->pause_aneg_en = 0;
 
-	oldadv = phydev->advertising &
-		(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-	if (oldadv != newadv) {
-		phydev->advertising &=
-			~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-		phydev->advertising |= newadv;
-		if (phydev->autoneg)
-			/* inform link partner of our
-			 * new flow ctrl settings
-			 */
-			return phy_start_aneg(phydev);
+	if (!epause->autoneg) {
+		u32 tempval = gfar_read(&regs->maccfg1);
 
-		if (!epause->autoneg) {
-			u32 tempval;
-			tempval = gfar_read(&regs->maccfg1);
-			tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+		tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
 
-			priv->tx_actual_en = 0;
-			if (priv->tx_pause_en) {
-				priv->tx_actual_en = 1;
-				tempval |= MACCFG1_TX_FLOW;
-			}
-
-			if (priv->rx_pause_en)
-				tempval |= MACCFG1_RX_FLOW;
-			gfar_write(&regs->maccfg1, tempval);
+		priv->tx_actual_en = 0;
+		if (priv->tx_pause_en) {
+			priv->tx_actual_en = 1;
+			tempval |= MACCFG1_TX_FLOW;
 		}
+
+		if (priv->rx_pause_en)
+			tempval |= MACCFG1_RX_FLOW;
+		gfar_write(&regs->maccfg1, tempval);
 	}
 
 	return 0;
@@ -1155,11 +1118,9 @@
 		prio = vlan_tci_prio(rule);
 		prio_mask = vlan_tci_priom(rule);
 
-		if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
-			vlan |= RQFPR_CFI;
-			vlan_mask |= RQFPR_CFI;
-		} else if (cfi != VLAN_TAG_PRESENT &&
-			   cfi_mask == VLAN_TAG_PRESENT) {
+		if (cfi_mask) {
+			if (cfi)
+				vlan |= RQFPR_CFI;
 			vlan_mask |= RQFPR_CFI;
 		}
 	}
@@ -1515,7 +1476,7 @@
 	struct gfar_private *priv = netdev_priv(dev);
 	struct platform_device *ptp_dev;
 	struct device_node *ptp_node;
-	struct qoriq_ptp *ptp = NULL;
+	struct ptp_qoriq *ptp = NULL;
 
 	info->phc_index = -1;
 
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 22a817d..f839fa9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
  *
@@ -6,11 +7,6 @@
  *
  * Description:
  * QE UCC Gigabit Ethernet Driver
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -30,6 +26,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/workqueue.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -1742,17 +1739,7 @@
 	if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
 		uec_configure_serdes(dev);
 
-	phydev->supported &= (SUPPORTED_MII |
-			      SUPPORTED_Autoneg |
-			      ADVERTISED_10baseT_Half |
-			      ADVERTISED_10baseT_Full |
-			      ADVERTISED_100baseT_Half |
-			      ADVERTISED_100baseT_Full);
-
-	if (priv->max_speed == SPEED_1000)
-		phydev->supported |= ADVERTISED_1000baseT_Full;
-
-	phydev->advertising = phydev->supported;
+	phy_set_max_speed(phydev, priv->max_speed);
 
 	priv->phydev = phydev;
 
@@ -1888,6 +1875,8 @@
 	u16 i, j;
 	u8 __iomem *bd;
 
+	netdev_reset_queue(ugeth->ndev);
+
 	ug_info = ugeth->ug_info;
 	uf_info = &ug_info->uf_info;
 
@@ -3083,7 +3072,8 @@
 
 /* This is called by the kernel when a frame is ready for transmission. */
 /* It is pointed to by the dev->hard_start_xmit function pointer */
-static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ucc_geth_private *ugeth = netdev_priv(dev);
 #ifdef CONFIG_UGETH_TX_ON_DEMAND
@@ -3685,6 +3675,7 @@
 	.ndo_stop		= ucc_geth_close,
 	.ndo_start_xmit		= ucc_geth_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_carrier     = fixed_phy_change_carrier,
 	.ndo_set_mac_address	= ucc_geth_set_mac_addr,
 	.ndo_set_rx_mode	= ucc_geth_set_multi,
 	.ndo_tx_timeout		= ucc_geth_timeout,
@@ -3915,8 +3906,8 @@
 	}
 
 	mac_addr = of_get_mac_address(np);
-	if (mac_addr)
-		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+	if (!IS_ERR(mac_addr))
+		ether_addr_copy(dev->dev_addr, mac_addr);
 
 	ugeth->ug_info = ug_info;
 	ugeth->dev = device;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 5da19b4..a86a421 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved.
  *
@@ -9,11 +10,6 @@
  * Changelog:
  * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
  * - Rearrange code and style fixes
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
  */
 #ifndef __UCC_GETH_H__
 #define __UCC_GETH_H__
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 0beee2c..dfebacf 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
  *
@@ -8,11 +9,6 @@
  * Limitation:
  * Can only get/set settings of the first queue.
  * Need to re-open the interface manually after changing some parameters.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
  */
 
 #include <linux/kernel.h>
@@ -252,14 +248,12 @@
 		return -EINVAL;
 	}
 
+	if (netif_running(netdev))
+		return -EBUSY;
+
 	ug_info->bdRingLenRx[queue] = ring->rx_pending;
 	ug_info->bdRingLenTx[queue] = ring->tx_pending;
 
-	if (netif_running(netdev)) {
-		/* FIXME: restart automatically */
-		netdev_info(netdev, "Please re-open the interface\n");
-	}
-
 	return ret;
 }